From c53d8e343e50d4cf7ea9a6a81258848c2d893bfb Mon Sep 17 00:00:00 2001 From: Jenkins Date: Thu, 3 May 2012 10:48:26 -0700 Subject: [PATCH] Initial fork out of Nova. --- .gitignore | 24 + .gitreview | 4 + .mailmap | 81 + Authors | 211 + HACKING.rst | 213 + LICENSE | 176 + MANIFEST.in | 37 + README.rst | 21 + babel.cfg | 2 + bin/cinder-all | 70 + bin/cinder-api | 47 + bin/cinder-manage | 635 + bin/cinder-rootwrap | 74 + bin/cinder-scheduler | 51 + bin/cinder-volume | 49 + bin/clear_rabbit_queues | 80 + cinder/__init__.py | 42 + cinder/api/__init__.py | 17 + cinder/api/auth.py | 103 + cinder/api/openstack/__init__.py | 143 + cinder/api/openstack/auth.py | 65 + cinder/api/openstack/common.py | 380 + cinder/api/openstack/compute/__init__.py | 23 + .../openstack/compute/schemas/atom-link.rng | 141 + .../compute/schemas/v1.1/extension.rng | 11 + .../compute/schemas/v1.1/extensions.rng | 6 + .../compute/schemas/v1.1/metadata.rng | 9 + cinder/api/openstack/compute/versions.py | 244 + .../api/openstack/compute/views/__init__.py | 0 .../api/openstack/compute/views/versions.py | 94 + cinder/api/openstack/extensions.py | 395 + cinder/api/openstack/urlmap.py | 297 + cinder/api/openstack/volume/__init__.py | 62 + .../api/openstack/volume/contrib/__init__.py | 39 + .../volume/contrib/types_extra_specs.py | 152 + .../openstack/volume/contrib/types_manage.py | 91 + cinder/api/openstack/volume/extensions.py | 33 + cinder/api/openstack/volume/snapshots.py | 170 + cinder/api/openstack/volume/types.py | 76 + cinder/api/openstack/volume/versions.py | 83 + cinder/api/openstack/volume/views/__init__.py | 16 + cinder/api/openstack/volume/views/versions.py | 36 + cinder/api/openstack/volume/volumes.py | 263 + cinder/api/openstack/wsgi.py | 1123 ++ cinder/api/openstack/xmlutil.py | 908 + cinder/api/sizelimit.py | 54 + cinder/common/__init__.py | 15 + cinder/common/memorycache.py | 64 + cinder/common/policy.py | 222 + cinder/compat/__init__.py | 15 + cinder/compat/flagfile.py | 188 + cinder/compute/__init__.py | 0 cinder/compute/aggregate_states.py | 44 + cinder/context.py | 138 + cinder/db/__init__.py | 23 + cinder/db/api.py | 1335 ++ cinder/db/base.py | 40 + cinder/db/migration.py | 35 + cinder/db/sqlalchemy/__init__.py | 17 + cinder/db/sqlalchemy/api.py | 1499 ++ cinder/db/sqlalchemy/migrate_repo/README | 4 + cinder/db/sqlalchemy/migrate_repo/__init__.py | 0 cinder/db/sqlalchemy/migrate_repo/manage.py | 4 + cinder/db/sqlalchemy/migrate_repo/migrate.cfg | 20 + .../migrate_repo/versions/001_austin.py | 627 + .../migrate_repo/versions/002_bexar.py | 236 + .../versions/002_postgresql_downgrade.sql | 20 + .../versions/002_sqlite_downgrade.sql | 388 + .../versions/003_add_label_to_networks.py | 42 + .../versions/003_sqlite_downgrade.sql | 111 + .../versions/004_add_zone_tables.py | 66 + .../versions/005_add_instance_metadata.py | 81 + .../006_add_provider_data_to_volumes.py | 54 + .../versions/006_sqlite_downgrade.sql | 113 + .../versions/007_add_ipv6_to_fixed_ips.py | 70 + .../versions/007_sqlite_downgrade.sql | 79 + .../versions/008_add_instance_types.py | 85 + .../versions/009_add_instance_migrations.py | 70 + .../versions/010_add_os_type_to_instances.py | 45 + .../versions/011_live_migration.py | 85 + .../versions/012_add_ipv6_flatmanager.py | 90 + .../versions/012_sqlite_upgrade.sql | 195 + .../versions/013_add_flavors_to_migrations.py | 43 + .../versions/013_sqlite_downgrade.sql | 69 + .../014_add_instance_type_id_to_instances.py | 74 + .../015_add_auto_assign_to_floating_ips.py | 35 + .../versions/015_sqlite_downgrade.sql | 62 + .../versions/016_make_quotas_key_and_value.py | 213 + .../017_make_instance_type_id_an_integer.py | 87 + .../018_rename_server_management_url.py | 35 + .../019_add_volume_snapshot_support.py | 82 + .../020_add_snapshot_id_to_volumes.py | 40 + .../versions/020_sqlite_downgrade.sql | 119 + .../versions/021_rename_image_ids.py | 38 + .../versions/022_set_engine_mysql_innodb.py | 64 + .../versions/023_add_vm_mode_to_instances.py | 42 + .../versions/024_add_block_device_mapping.py | 92 + .../versions/025_add_uuid_to_instances.py | 45 + .../versions/026_add_agent_table.py | 89 + .../027_add_provider_firewall_rules.py | 65 + .../028_add_instance_type_extra_specs.py | 76 + .../versions/029_add_zone_weight_offsets.py | 41 + .../migrate_repo/versions/030_multi_nic.py | 146 + .../versions/030_sqlite_downgrade.sql | 377 + .../031_fk_fixed_ips_virtual_interface_id.py | 59 + .../versions/031_sqlite_downgrade.sql | 48 + .../versions/031_sqlite_upgrade.sql | 48 + .../versions/032_add_root_device_name.py | 42 + .../migrate_repo/versions/033_ha_network.py | 42 + .../versions/033_sqlite_downgrade.sql | 193 + .../034_change_instance_id_in_migrations.py | 46 + .../versions/035_secondary_dns.py | 39 + .../036_change_flavor_id_in_migrations.py | 79 + .../versions/037_instances_drop_admin_pass.py | 42 + .../038_add_uuid_to_virtual_interfaces.py | 45 + .../versions/038_sqlite_downgrade.sql | 63 + .../versions/039_add_instances_accessip.py | 49 + .../versions/040_add_uuid_to_networks.py | 45 + .../041_add_config_drive_to_instances.py | 36 + .../042_add_volume_types_and_extradata.py | 122 + .../versions/042_sqlite_downgrade.sql | 129 + .../migrate_repo/versions/043_add_vsa_data.py | 84 + .../versions/044_update_instance_states.py | 52 + .../versions/045_add_network_priority.py | 44 + .../versions/046_add_instance_swap.py | 49 + .../047_remove_instances_fk_from_vif.py | 61 + .../versions/047_sqlite_downgrade.sql | 46 + .../versions/047_sqlite_upgrade.sql | 45 + .../versions/048_add_zone_name.py | 33 + .../versions/049_add_instances_progress.py | 44 + .../050_add_disk_config_to_instances.py | 37 + .../versions/050_sqlite_downgrade.sql | 207 + .../051_add_vcpu_weight_to_instance_types.py | 34 + .../versions/052_kill_export_devices.py | 65 + ...connection_info_to_block_device_mapping.py | 38 + .../versions/053_sqlite_downgrade.sql | 87 + .../versions/054_add_bw_usage_data_cache.py | 64 + .../versions/055_convert_flavor_id_to_str.py | 112 + .../versions/056_add_s3_images.py | 60 + .../versions/057_add_sm_driver_tables.py | 113 + .../versions/058_rename_managed_disk.py | 37 + .../059_split_rxtx_quota_into_network.py | 61 + .../versions/059_sqlite_downgrade.sql | 137 + .../versions/059_sqlite_upgrade.sql | 87 + .../060_remove_network_fk_from_vif.py | 62 + .../versions/060_sqlite_downgrade.sql | 45 + .../versions/060_sqlite_upgrade.sql | 44 + .../061_add_index_to_instance_uuid.py | 29 + .../062_add_instance_info_cache_table.py | 70 + .../versions/063_add_instance_faults_table.py | 60 + ...instance_id_to_uuid_in_instance_actions.py | 80 + .../065_add_index_to_instance_project_id.py | 31 + .../066_preload_instance_info_cache_table.py | 31 + ...7_add_pool_and_interface_to_floating_ip.py | 41 + .../versions/067_sqlite_downgrade.sql | 69 + .../versions/068_add_instance_attribute.py | 36 + .../versions/068_sqlite_downgrade.sql | 219 + .../versions/069_block_migration.py | 50 + .../versions/070_sqlite_downgrade.sql | 103 + .../versions/070_sqlite_upgrade.sql | 99 + .../versions/070_untie_nova_network_models.py | 100 + .../versions/071_add_host_aggregate_tables.py | 108 + .../versions/072_add_dns_table.py | 77 + .../versions/072_mysql_upgrade.sql | 13 + .../migrate_repo/versions/073_add_capacity.py | 49 + .../versions/074_change_flavor_local_gb.py | 130 + .../versions/074_sqlite_upgrade.sql | 313 + ...75_convert_bw_usage_to_store_network_id.py | 97 + .../versions/076_remove_unique_constraints.py | 84 + .../versions/076_sqlite_upgrade.sql | 61 + .../versions/077_convert_to_utf8.py | 61 + .../versions/078_add_rpc_info_to_zones.py | 46 + .../versions/078_sqlite_downgrade.sql | 35 + .../079_add_zone_name_to_instances.py | 30 + ...dd_hypervisor_hostname_to_compute_nodes.py | 30 + .../versions/081_drop_instance_id_bw_cache.py | 69 + .../migrate_repo/versions/082_zone_to_cell.py | 35 + .../migrate_repo/versions/083_quota_class.py | 61 + .../versions/084_quotas_unlimited.py | 43 + .../085_add_index_to_fixed_ips_by_address.py | 31 + .../versions/086_set_engine_mysql_innodb.py | 44 + .../087_add_uuid_to_bw_usage_cache.py | 56 + ...ance_id_to_uuid_in_block_device_mapping.py | 81 + .../versions/088_sqlite_downgrade.sql | 97 + .../versions/088_sqlite_upgrade.sql | 97 + .../versions/089_add_volume_id_mappings.py | 116 + .../versions/090_modify_volume_id_datatype.py | 239 + .../versions/090_sqlite_downgrade.sql | 226 + .../versions/090_sqlite_upgrade.sql | 226 + .../091_convert_volume_ids_to_uuid.py | 145 + .../migrate_repo/versions/__init__.py | 0 cinder/db/sqlalchemy/migration.py | 129 + cinder/db/sqlalchemy/models.py | 1063 + cinder/db/sqlalchemy/session.py | 156 + cinder/exception.py | 938 + cinder/flags.py | 356 + cinder/locale/bs/LC_MESSAGES/nova.po | 8201 ++++++++ cinder/locale/cs/LC_MESSAGES/nova.po | 8251 ++++++++ cinder/locale/da/LC_MESSAGES/nova.po | 8203 ++++++++ cinder/locale/de/LC_MESSAGES/nova.po | 8208 ++++++++ cinder/locale/en_AU/LC_MESSAGES/nova.po | 8209 ++++++++ cinder/locale/en_GB/LC_MESSAGES/nova.po | 8209 ++++++++ cinder/locale/es/LC_MESSAGES/nova.po | 8220 ++++++++ cinder/locale/fr/LC_MESSAGES/nova.po | 8251 ++++++++ cinder/locale/it/LC_MESSAGES/nova.po | 8210 ++++++++ cinder/locale/ja/LC_MESSAGES/nova.po | 8196 ++++++++ cinder/locale/ko/LC_MESSAGES/nova.po | 8207 ++++++++ cinder/locale/nova.pot | 7463 +++++++ cinder/locale/pt_BR/LC_MESSAGES/nova.po | 8208 ++++++++ cinder/locale/ru/LC_MESSAGES/nova.po | 8304 ++++++++ cinder/locale/tl/LC_MESSAGES/nova.po | 8200 ++++++++ cinder/locale/tr/LC_MESSAGES/nova.po | 8202 ++++++++ cinder/locale/uk/LC_MESSAGES/nova.po | 8199 ++++++++ cinder/locale/zh_CN/LC_MESSAGES/nova.po | 8064 ++++++++ cinder/locale/zh_TW/LC_MESSAGES/nova.po | 8207 ++++++++ cinder/log.py | 416 + cinder/manager.py | 205 + cinder/notifier/__init__.py | 14 + cinder/notifier/api.py | 133 + cinder/notifier/capacity_notifier.py | 81 + cinder/notifier/list_notifier.py | 71 + cinder/notifier/log_notifier.py | 34 + cinder/notifier/no_op_notifier.py | 19 + cinder/notifier/rabbit_notifier.py | 46 + cinder/notifier/test_notifier.py | 25 + cinder/openstack/__init__.py | 15 + cinder/openstack/common/README | 13 + cinder/openstack/common/__init__.py | 15 + cinder/openstack/common/cfg.py | 1298 ++ cinder/openstack/common/exception.py | 147 + cinder/openstack/common/importutils.py | 45 + cinder/openstack/common/iniparser.py | 126 + cinder/openstack/common/local.py | 37 + cinder/policy.py | 90 + cinder/quota.py | 234 + cinder/rootwrap/__init__.py | 16 + cinder/rootwrap/filters.py | 147 + cinder/rootwrap/volume.py | 45 + cinder/rootwrap/wrapper.py | 60 + cinder/rpc/__init__.py | 227 + cinder/rpc/amqp.py | 405 + cinder/rpc/common.py | 220 + cinder/rpc/impl_fake.py | 185 + cinder/rpc/impl_kombu.py | 713 + cinder/rpc/impl_qpid.py | 563 + cinder/scheduler/__init__.py | 27 + cinder/scheduler/api.py | 72 + cinder/scheduler/chance.py | 83 + cinder/scheduler/driver.py | 164 + cinder/scheduler/host_manager.py | 36 + cinder/scheduler/manager.py | 204 + cinder/scheduler/simple.py | 144 + cinder/service.py | 429 + cinder/test.py | 295 + cinder/testing/README.rst | 66 + cinder/testing/__init__.py | 0 cinder/testing/fake/__init__.py | 1 + cinder/testing/runner.py | 372 + cinder/tests/__init__.py | 84 + cinder/tests/api/__init__.py | 19 + cinder/tests/api/openstack/__init__.py | 19 + cinder/tests/api/openstack/common.py | 58 + cinder/tests/api/openstack/fakes.py | 234 + cinder/tests/api/openstack/test_common.py | 526 + cinder/tests/api/openstack/test_faults.py | 208 + cinder/tests/api/openstack/test_wsgi.py | 833 + cinder/tests/api/openstack/test_xmlutil.py | 722 + cinder/tests/api/openstack/volume/__init__.py | 19 + .../api/openstack/volume/contrib/__init__.py | 19 + .../volume/contrib/test_types_extra_specs.py | 202 + .../volume/contrib/test_types_manage.py | 103 + .../openstack/volume/extensions/__init__.py | 15 + .../openstack/volume/extensions/foxinsocks.py | 94 + .../api/openstack/volume/test_extensions.py | 156 + .../tests/api/openstack/volume/test_router.py | 105 + .../api/openstack/volume/test_snapshots.py | 214 + .../tests/api/openstack/volume/test_types.py | 146 + .../api/openstack/volume/test_volumes.py | 290 + cinder/tests/api/test_auth.py | 58 + cinder/tests/api/test_sizelimit.py | 51 + cinder/tests/api/test_wsgi.py | 67 + cinder/tests/db/__init__.py | 20 + cinder/tests/db/fakes.py | 47 + cinder/tests/declare_flags.py | 23 + cinder/tests/fake_flags.py | 34 + cinder/tests/fake_utils.py | 112 + cinder/tests/integrated/__init__.py | 22 + cinder/tests/integrated/api/__init__.py | 20 + cinder/tests/integrated/api/client.py | 217 + cinder/tests/integrated/integrated_helpers.py | 130 + cinder/tests/integrated/test_extensions.py | 42 + cinder/tests/integrated/test_login.py | 31 + cinder/tests/integrated/test_volumes.py | 181 + cinder/tests/integrated/test_xml.py | 52 + cinder/tests/monkey_patch_example/__init__.py | 33 + .../tests/monkey_patch_example/example_a.py | 29 + .../tests/monkey_patch_example/example_b.py | 30 + cinder/tests/notifier/__init__.py | 16 + .../tests/notifier/test_capacity_notifier.py | 59 + cinder/tests/notifier/test_list_notifier.py | 84 + cinder/tests/policy.json | 25 + cinder/tests/rpc/__init__.py | 19 + cinder/tests/rpc/common.py | 239 + cinder/tests/rpc/test_common.py | 147 + cinder/tests/rpc/test_fake.py | 33 + cinder/tests/rpc/test_kombu.py | 350 + cinder/tests/rpc/test_kombu_ssl.py | 58 + cinder/tests/rpc/test_qpid.py | 340 + cinder/tests/runtime_flags.py | 23 + cinder/tests/scheduler/__init__.py | 19 + cinder/tests/scheduler/fakes.py | 62 + cinder/tests/scheduler/test_scheduler.py | 322 + cinder/tests/test_SolidFireSanISCSIDriver.py | 186 + cinder/tests/test_api.py | 75 + cinder/tests/test_compat_flagfile.py | 175 + cinder/tests/test_context.py | 70 + cinder/tests/test_db_api.py | 331 + cinder/tests/test_exception.py | 126 + cinder/tests/test_flags.py | 146 + cinder/tests/test_iscsi.py | 116 + cinder/tests/test_log.py | 218 + cinder/tests/test_migrations.conf | 9 + cinder/tests/test_migrations.py | 296 + cinder/tests/test_misc.py | 184 + cinder/tests/test_netapp.py | 927 + cinder/tests/test_nexenta.py | 281 + cinder/tests/test_notifier.py | 133 + cinder/tests/test_nova_rootwrap.py | 133 + cinder/tests/test_policy.py | 189 + cinder/tests/test_quota.py | 316 + cinder/tests/test_service.py | 221 + cinder/tests/test_skip_examples.py | 47 + cinder/tests/test_test.py | 44 + cinder/tests/test_test_utils.py | 29 + cinder/tests/test_utils.py | 1188 ++ cinder/tests/test_versions.py | 59 + cinder/tests/test_volume.py | 501 + cinder/tests/test_volume_types.py | 167 + cinder/tests/test_volume_types_extra_specs.py | 130 + cinder/tests/test_wsgi.py | 92 + cinder/tests/utils.py | 25 + cinder/utils.py | 1678 ++ cinder/version.py | 38 + cinder/volume/__init__.py | 25 + cinder/volume/api.py | 371 + cinder/volume/driver.py | 709 + cinder/volume/iscsi.py | 160 + cinder/volume/manager.py | 331 + cinder/volume/netapp.py | 676 + cinder/volume/nexenta/__init__.py | 33 + cinder/volume/nexenta/jsonrpc.py | 84 + cinder/volume/nexenta/volume.py | 282 + cinder/volume/san.py | 897 + cinder/volume/volume_types.py | 125 + cinder/volume/xensm.py | 237 + cinder/wsgi.py | 374 + contrib/openstack-config | 65 + contrib/redhat-eventlet.patch | 16 + doc/.gitignore | 3 + doc/Makefile | 97 + doc/README.rst | 55 + doc/ext/__init__.py | 0 doc/ext/nova_autodoc.py | 12 + doc/ext/nova_todo.py | 101 + doc/find_autodoc_modules.sh | 20 + doc/generate_autodoc_index.sh | 46 + doc/source/_ga/layout.html | 17 + doc/source/_static/.gitignore | 0 doc/source/_static/.placeholder | 0 doc/source/_static/basic.css | 416 + doc/source/_static/default.css | 230 + doc/source/_static/jquery.tweet.js | 154 + doc/source/_static/tweaks.css | 218 + doc/source/_templates/.gitignore | 0 doc/source/_templates/.placeholder | 0 doc/source/_theme/layout.html | 95 + doc/source/_theme/theme.conf | 5 + doc/source/conf.py | 234 + doc/source/devref/addmethod.openstackapi.rst | 56 + doc/source/devref/aggregates.rst | 65 + doc/source/devref/api.rst | 270 + doc/source/devref/architecture.rst | 52 + doc/source/devref/auth.rst | 276 + doc/source/devref/cloudpipe.rst | 166 + doc/source/devref/database.rst | 63 + doc/source/devref/development.environment.rst | 152 + doc/source/devref/down.sh | 7 + doc/source/devref/fakes.rst | 85 + doc/source/devref/filter_scheduler.rst | 258 + doc/source/devref/gerrit.rst | 16 + doc/source/devref/glance.rst | 28 + doc/source/devref/il8n.rst | 34 + doc/source/devref/index.rst | 86 + doc/source/devref/interfaces | 17 + doc/source/devref/jenkins.rst | 41 + doc/source/devref/launchpad.rst | 54 + doc/source/devref/multinic.rst | 39 + doc/source/devref/network.rst | 128 + doc/source/devref/nova.rst | 215 + doc/source/devref/rc.local | 36 + doc/source/devref/rpc.rst | 151 + doc/source/devref/scheduler.rst | 71 + doc/source/devref/server.conf.template | 34 + doc/source/devref/services.rst | 55 + doc/source/devref/threading.rst | 51 + doc/source/devref/unit_tests.rst | 159 + doc/source/devref/up.sh | 7 + doc/source/devref/volume.rst | 66 + doc/source/devref/xensmvolume.rst | 88 + doc/source/image_src/multinic_1.odg | Bin 0 -> 12363 bytes doc/source/image_src/multinic_2.odg | Bin 0 -> 13425 bytes doc/source/image_src/multinic_3.odg | Bin 0 -> 13598 bytes doc/source/images/NOVA_ARCH.png | Bin 0 -> 191332 bytes doc/source/images/NOVA_ARCH.svg | 5854 ++++++ doc/source/images/NOVA_ARCH_200dpi.png | Bin 0 -> 439024 bytes doc/source/images/NOVA_ARCH_66dpi.png | Bin 0 -> 110890 bytes doc/source/images/NOVA_clouds_A_B.png | Bin 0 -> 77007 bytes doc/source/images/NOVA_clouds_A_B.svg | 16342 ++++++++++++++++ doc/source/images/NOVA_clouds_C1_C2.svg | 9763 +++++++++ doc/source/images/NOVA_clouds_C1_C2.svg.png | Bin 0 -> 448574 bytes doc/source/images/Novadiagram.png | Bin 0 -> 52609 bytes doc/source/images/base_scheduler.png | Bin 0 -> 17068 bytes doc/source/images/cloudpipe.png | Bin 0 -> 89812 bytes doc/source/images/fabric.png | Bin 0 -> 125915 bytes doc/source/images/filteringWorkflow1.png | Bin 0 -> 66997 bytes doc/source/images/filteringWorkflow2.png | Bin 0 -> 75288 bytes doc/source/images/multinic_dhcp.png | Bin 0 -> 54531 bytes doc/source/images/multinic_flat.png | Bin 0 -> 40871 bytes doc/source/images/multinic_vlan.png | Bin 0 -> 58552 bytes doc/source/images/nova.compute.api.create.png | Bin 0 -> 50171 bytes doc/source/images/novascreens.png | Bin 0 -> 27949 bytes doc/source/images/novashvirtually.png | Bin 0 -> 39000 bytes doc/source/images/rpc/arch.png | Bin 0 -> 26690 bytes doc/source/images/rpc/arch.svg | 292 + doc/source/images/rpc/flow1.png | Bin 0 -> 40982 bytes doc/source/images/rpc/flow1.svg | 617 + doc/source/images/rpc/flow2.png | Bin 0 -> 30650 bytes doc/source/images/rpc/flow2.svg | 423 + doc/source/images/rpc/rabt.png | Bin 0 -> 44964 bytes doc/source/images/rpc/rabt.svg | 581 + doc/source/images/rpc/state.png | Bin 0 -> 38543 bytes doc/source/images/vmwareapi_blockdiagram.jpg | Bin 0 -> 75363 bytes doc/source/images/zone_aware_overview.png | Bin 0 -> 56142 bytes doc/source/images/zone_aware_scheduler.png | Bin 0 -> 20902 bytes doc/source/images/zone_overview.png | Bin 0 -> 51587 bytes doc/source/index.rst | 67 + doc/source/man/nova-manage.rst | 281 + etc/cinder/api-paste.ini | 51 + etc/cinder/cinder.conf.sample | 673 + etc/cinder/logging_sample.conf | 76 + etc/cinder/policy.json | 15 + openstack-common.conf | 7 + pylintrc | 38 + run_tests.sh | 176 + setup.cfg | 32 + setup.py | 73 + tools/clean-vlans | 25 + tools/clean_file_locks.py | 63 + tools/conf/create_conf.py | 159 + tools/conf/generate_sample.sh | 25 + tools/enable-pre-commit-hook.sh | 42 + tools/hacking.py | 391 + tools/install_venv.py | 248 + tools/pip-requires | 22 + tools/rfc.sh | 150 + tools/test-requires | 11 + tools/with_venv.sh | 4 + tox.ini | 38 + 468 files changed, 247534 insertions(+) create mode 100644 .gitignore create mode 100644 .gitreview create mode 100644 .mailmap create mode 100644 Authors create mode 100644 HACKING.rst create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.rst create mode 100644 babel.cfg create mode 100755 bin/cinder-all create mode 100755 bin/cinder-api create mode 100755 bin/cinder-manage create mode 100755 bin/cinder-rootwrap create mode 100755 bin/cinder-scheduler create mode 100755 bin/cinder-volume create mode 100755 bin/clear_rabbit_queues create mode 100644 cinder/__init__.py create mode 100644 cinder/api/__init__.py create mode 100644 cinder/api/auth.py create mode 100644 cinder/api/openstack/__init__.py create mode 100644 cinder/api/openstack/auth.py create mode 100644 cinder/api/openstack/common.py create mode 100644 cinder/api/openstack/compute/__init__.py create mode 100644 cinder/api/openstack/compute/schemas/atom-link.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/extension.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/extensions.rng create mode 100644 cinder/api/openstack/compute/schemas/v1.1/metadata.rng create mode 100644 cinder/api/openstack/compute/versions.py create mode 100644 cinder/api/openstack/compute/views/__init__.py create mode 100644 cinder/api/openstack/compute/views/versions.py create mode 100644 cinder/api/openstack/extensions.py create mode 100644 cinder/api/openstack/urlmap.py create mode 100644 cinder/api/openstack/volume/__init__.py create mode 100644 cinder/api/openstack/volume/contrib/__init__.py create mode 100644 cinder/api/openstack/volume/contrib/types_extra_specs.py create mode 100644 cinder/api/openstack/volume/contrib/types_manage.py create mode 100644 cinder/api/openstack/volume/extensions.py create mode 100644 cinder/api/openstack/volume/snapshots.py create mode 100644 cinder/api/openstack/volume/types.py create mode 100644 cinder/api/openstack/volume/versions.py create mode 100644 cinder/api/openstack/volume/views/__init__.py create mode 100644 cinder/api/openstack/volume/views/versions.py create mode 100644 cinder/api/openstack/volume/volumes.py create mode 100644 cinder/api/openstack/wsgi.py create mode 100644 cinder/api/openstack/xmlutil.py create mode 100644 cinder/api/sizelimit.py create mode 100644 cinder/common/__init__.py create mode 100644 cinder/common/memorycache.py create mode 100644 cinder/common/policy.py create mode 100644 cinder/compat/__init__.py create mode 100644 cinder/compat/flagfile.py create mode 100644 cinder/compute/__init__.py create mode 100644 cinder/compute/aggregate_states.py create mode 100644 cinder/context.py create mode 100644 cinder/db/__init__.py create mode 100644 cinder/db/api.py create mode 100644 cinder/db/base.py create mode 100644 cinder/db/migration.py create mode 100644 cinder/db/sqlalchemy/__init__.py create mode 100644 cinder/db/sqlalchemy/api.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/README create mode 100644 cinder/db/sqlalchemy/migrate_repo/__init__.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/manage.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/migrate.cfg create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py create mode 100644 cinder/db/sqlalchemy/migrate_repo/versions/__init__.py create mode 100644 cinder/db/sqlalchemy/migration.py create mode 100644 cinder/db/sqlalchemy/models.py create mode 100644 cinder/db/sqlalchemy/session.py create mode 100644 cinder/exception.py create mode 100644 cinder/flags.py create mode 100644 cinder/locale/bs/LC_MESSAGES/nova.po create mode 100644 cinder/locale/cs/LC_MESSAGES/nova.po create mode 100644 cinder/locale/da/LC_MESSAGES/nova.po create mode 100644 cinder/locale/de/LC_MESSAGES/nova.po create mode 100644 cinder/locale/en_AU/LC_MESSAGES/nova.po create mode 100644 cinder/locale/en_GB/LC_MESSAGES/nova.po create mode 100644 cinder/locale/es/LC_MESSAGES/nova.po create mode 100644 cinder/locale/fr/LC_MESSAGES/nova.po create mode 100644 cinder/locale/it/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ja/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ko/LC_MESSAGES/nova.po create mode 100644 cinder/locale/nova.pot create mode 100644 cinder/locale/pt_BR/LC_MESSAGES/nova.po create mode 100644 cinder/locale/ru/LC_MESSAGES/nova.po create mode 100644 cinder/locale/tl/LC_MESSAGES/nova.po create mode 100644 cinder/locale/tr/LC_MESSAGES/nova.po create mode 100644 cinder/locale/uk/LC_MESSAGES/nova.po create mode 100644 cinder/locale/zh_CN/LC_MESSAGES/nova.po create mode 100644 cinder/locale/zh_TW/LC_MESSAGES/nova.po create mode 100644 cinder/log.py create mode 100644 cinder/manager.py create mode 100644 cinder/notifier/__init__.py create mode 100644 cinder/notifier/api.py create mode 100644 cinder/notifier/capacity_notifier.py create mode 100644 cinder/notifier/list_notifier.py create mode 100644 cinder/notifier/log_notifier.py create mode 100644 cinder/notifier/no_op_notifier.py create mode 100644 cinder/notifier/rabbit_notifier.py create mode 100644 cinder/notifier/test_notifier.py create mode 100644 cinder/openstack/__init__.py create mode 100644 cinder/openstack/common/README create mode 100644 cinder/openstack/common/__init__.py create mode 100644 cinder/openstack/common/cfg.py create mode 100644 cinder/openstack/common/exception.py create mode 100644 cinder/openstack/common/importutils.py create mode 100644 cinder/openstack/common/iniparser.py create mode 100644 cinder/openstack/common/local.py create mode 100644 cinder/policy.py create mode 100644 cinder/quota.py create mode 100755 cinder/rootwrap/__init__.py create mode 100755 cinder/rootwrap/filters.py create mode 100755 cinder/rootwrap/volume.py create mode 100755 cinder/rootwrap/wrapper.py create mode 100644 cinder/rpc/__init__.py create mode 100644 cinder/rpc/amqp.py create mode 100644 cinder/rpc/common.py create mode 100644 cinder/rpc/impl_fake.py create mode 100644 cinder/rpc/impl_kombu.py create mode 100644 cinder/rpc/impl_qpid.py create mode 100644 cinder/scheduler/__init__.py create mode 100644 cinder/scheduler/api.py create mode 100644 cinder/scheduler/chance.py create mode 100644 cinder/scheduler/driver.py create mode 100644 cinder/scheduler/host_manager.py create mode 100644 cinder/scheduler/manager.py create mode 100644 cinder/scheduler/simple.py create mode 100644 cinder/service.py create mode 100644 cinder/test.py create mode 100644 cinder/testing/README.rst create mode 100644 cinder/testing/__init__.py create mode 100644 cinder/testing/fake/__init__.py create mode 100644 cinder/testing/runner.py create mode 100644 cinder/tests/__init__.py create mode 100644 cinder/tests/api/__init__.py create mode 100644 cinder/tests/api/openstack/__init__.py create mode 100644 cinder/tests/api/openstack/common.py create mode 100644 cinder/tests/api/openstack/fakes.py create mode 100644 cinder/tests/api/openstack/test_common.py create mode 100644 cinder/tests/api/openstack/test_faults.py create mode 100644 cinder/tests/api/openstack/test_wsgi.py create mode 100644 cinder/tests/api/openstack/test_xmlutil.py create mode 100644 cinder/tests/api/openstack/volume/__init__.py create mode 100644 cinder/tests/api/openstack/volume/contrib/__init__.py create mode 100644 cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py create mode 100644 cinder/tests/api/openstack/volume/contrib/test_types_manage.py create mode 100644 cinder/tests/api/openstack/volume/extensions/__init__.py create mode 100644 cinder/tests/api/openstack/volume/extensions/foxinsocks.py create mode 100644 cinder/tests/api/openstack/volume/test_extensions.py create mode 100644 cinder/tests/api/openstack/volume/test_router.py create mode 100644 cinder/tests/api/openstack/volume/test_snapshots.py create mode 100644 cinder/tests/api/openstack/volume/test_types.py create mode 100644 cinder/tests/api/openstack/volume/test_volumes.py create mode 100644 cinder/tests/api/test_auth.py create mode 100644 cinder/tests/api/test_sizelimit.py create mode 100644 cinder/tests/api/test_wsgi.py create mode 100644 cinder/tests/db/__init__.py create mode 100644 cinder/tests/db/fakes.py create mode 100644 cinder/tests/declare_flags.py create mode 100644 cinder/tests/fake_flags.py create mode 100644 cinder/tests/fake_utils.py create mode 100644 cinder/tests/integrated/__init__.py create mode 100644 cinder/tests/integrated/api/__init__.py create mode 100644 cinder/tests/integrated/api/client.py create mode 100644 cinder/tests/integrated/integrated_helpers.py create mode 100644 cinder/tests/integrated/test_extensions.py create mode 100644 cinder/tests/integrated/test_login.py create mode 100644 cinder/tests/integrated/test_volumes.py create mode 100644 cinder/tests/integrated/test_xml.py create mode 100644 cinder/tests/monkey_patch_example/__init__.py create mode 100644 cinder/tests/monkey_patch_example/example_a.py create mode 100644 cinder/tests/monkey_patch_example/example_b.py create mode 100644 cinder/tests/notifier/__init__.py create mode 100644 cinder/tests/notifier/test_capacity_notifier.py create mode 100644 cinder/tests/notifier/test_list_notifier.py create mode 100644 cinder/tests/policy.json create mode 100644 cinder/tests/rpc/__init__.py create mode 100644 cinder/tests/rpc/common.py create mode 100644 cinder/tests/rpc/test_common.py create mode 100644 cinder/tests/rpc/test_fake.py create mode 100644 cinder/tests/rpc/test_kombu.py create mode 100644 cinder/tests/rpc/test_kombu_ssl.py create mode 100644 cinder/tests/rpc/test_qpid.py create mode 100644 cinder/tests/runtime_flags.py create mode 100644 cinder/tests/scheduler/__init__.py create mode 100644 cinder/tests/scheduler/fakes.py create mode 100644 cinder/tests/scheduler/test_scheduler.py create mode 100644 cinder/tests/test_SolidFireSanISCSIDriver.py create mode 100644 cinder/tests/test_api.py create mode 100644 cinder/tests/test_compat_flagfile.py create mode 100644 cinder/tests/test_context.py create mode 100644 cinder/tests/test_db_api.py create mode 100644 cinder/tests/test_exception.py create mode 100644 cinder/tests/test_flags.py create mode 100644 cinder/tests/test_iscsi.py create mode 100644 cinder/tests/test_log.py create mode 100644 cinder/tests/test_migrations.conf create mode 100644 cinder/tests/test_migrations.py create mode 100644 cinder/tests/test_misc.py create mode 100644 cinder/tests/test_netapp.py create mode 100644 cinder/tests/test_nexenta.py create mode 100644 cinder/tests/test_notifier.py create mode 100644 cinder/tests/test_nova_rootwrap.py create mode 100644 cinder/tests/test_policy.py create mode 100644 cinder/tests/test_quota.py create mode 100644 cinder/tests/test_service.py create mode 100644 cinder/tests/test_skip_examples.py create mode 100644 cinder/tests/test_test.py create mode 100644 cinder/tests/test_test_utils.py create mode 100644 cinder/tests/test_utils.py create mode 100644 cinder/tests/test_versions.py create mode 100644 cinder/tests/test_volume.py create mode 100644 cinder/tests/test_volume_types.py create mode 100644 cinder/tests/test_volume_types_extra_specs.py create mode 100644 cinder/tests/test_wsgi.py create mode 100644 cinder/tests/utils.py create mode 100644 cinder/utils.py create mode 100644 cinder/version.py create mode 100644 cinder/volume/__init__.py create mode 100644 cinder/volume/api.py create mode 100644 cinder/volume/driver.py create mode 100644 cinder/volume/iscsi.py create mode 100644 cinder/volume/manager.py create mode 100644 cinder/volume/netapp.py create mode 100644 cinder/volume/nexenta/__init__.py create mode 100644 cinder/volume/nexenta/jsonrpc.py create mode 100644 cinder/volume/nexenta/volume.py create mode 100644 cinder/volume/san.py create mode 100644 cinder/volume/volume_types.py create mode 100644 cinder/volume/xensm.py create mode 100644 cinder/wsgi.py create mode 100755 contrib/openstack-config create mode 100644 contrib/redhat-eventlet.patch create mode 100644 doc/.gitignore create mode 100644 doc/Makefile create mode 100644 doc/README.rst create mode 100644 doc/ext/__init__.py create mode 100644 doc/ext/nova_autodoc.py create mode 100644 doc/ext/nova_todo.py create mode 100755 doc/find_autodoc_modules.sh create mode 100755 doc/generate_autodoc_index.sh create mode 100644 doc/source/_ga/layout.html create mode 100644 doc/source/_static/.gitignore create mode 100644 doc/source/_static/.placeholder create mode 100644 doc/source/_static/basic.css create mode 100644 doc/source/_static/default.css create mode 100644 doc/source/_static/jquery.tweet.js create mode 100644 doc/source/_static/tweaks.css create mode 100644 doc/source/_templates/.gitignore create mode 100644 doc/source/_templates/.placeholder create mode 100644 doc/source/_theme/layout.html create mode 100644 doc/source/_theme/theme.conf create mode 100644 doc/source/conf.py create mode 100644 doc/source/devref/addmethod.openstackapi.rst create mode 100644 doc/source/devref/aggregates.rst create mode 100644 doc/source/devref/api.rst create mode 100644 doc/source/devref/architecture.rst create mode 100644 doc/source/devref/auth.rst create mode 100644 doc/source/devref/cloudpipe.rst create mode 100644 doc/source/devref/database.rst create mode 100644 doc/source/devref/development.environment.rst create mode 100644 doc/source/devref/down.sh create mode 100644 doc/source/devref/fakes.rst create mode 100644 doc/source/devref/filter_scheduler.rst create mode 100644 doc/source/devref/gerrit.rst create mode 100644 doc/source/devref/glance.rst create mode 100644 doc/source/devref/il8n.rst create mode 100644 doc/source/devref/index.rst create mode 100644 doc/source/devref/interfaces create mode 100644 doc/source/devref/jenkins.rst create mode 100644 doc/source/devref/launchpad.rst create mode 100644 doc/source/devref/multinic.rst create mode 100644 doc/source/devref/network.rst create mode 100644 doc/source/devref/nova.rst create mode 100644 doc/source/devref/rc.local create mode 100644 doc/source/devref/rpc.rst create mode 100644 doc/source/devref/scheduler.rst create mode 100644 doc/source/devref/server.conf.template create mode 100644 doc/source/devref/services.rst create mode 100644 doc/source/devref/threading.rst create mode 100644 doc/source/devref/unit_tests.rst create mode 100644 doc/source/devref/up.sh create mode 100644 doc/source/devref/volume.rst create mode 100644 doc/source/devref/xensmvolume.rst create mode 100644 doc/source/image_src/multinic_1.odg create mode 100644 doc/source/image_src/multinic_2.odg create mode 100644 doc/source/image_src/multinic_3.odg create mode 100644 doc/source/images/NOVA_ARCH.png create mode 100644 doc/source/images/NOVA_ARCH.svg create mode 100644 doc/source/images/NOVA_ARCH_200dpi.png create mode 100644 doc/source/images/NOVA_ARCH_66dpi.png create mode 100644 doc/source/images/NOVA_clouds_A_B.png create mode 100644 doc/source/images/NOVA_clouds_A_B.svg create mode 100644 doc/source/images/NOVA_clouds_C1_C2.svg create mode 100644 doc/source/images/NOVA_clouds_C1_C2.svg.png create mode 100644 doc/source/images/Novadiagram.png create mode 100644 doc/source/images/base_scheduler.png create mode 100644 doc/source/images/cloudpipe.png create mode 100644 doc/source/images/fabric.png create mode 100644 doc/source/images/filteringWorkflow1.png create mode 100644 doc/source/images/filteringWorkflow2.png create mode 100644 doc/source/images/multinic_dhcp.png create mode 100644 doc/source/images/multinic_flat.png create mode 100644 doc/source/images/multinic_vlan.png create mode 100755 doc/source/images/nova.compute.api.create.png create mode 100644 doc/source/images/novascreens.png create mode 100644 doc/source/images/novashvirtually.png create mode 100644 doc/source/images/rpc/arch.png create mode 100644 doc/source/images/rpc/arch.svg create mode 100644 doc/source/images/rpc/flow1.png create mode 100644 doc/source/images/rpc/flow1.svg create mode 100644 doc/source/images/rpc/flow2.png create mode 100644 doc/source/images/rpc/flow2.svg create mode 100644 doc/source/images/rpc/rabt.png create mode 100644 doc/source/images/rpc/rabt.svg create mode 100644 doc/source/images/rpc/state.png create mode 100644 doc/source/images/vmwareapi_blockdiagram.jpg create mode 100755 doc/source/images/zone_aware_overview.png create mode 100644 doc/source/images/zone_aware_scheduler.png create mode 100755 doc/source/images/zone_overview.png create mode 100644 doc/source/index.rst create mode 100644 doc/source/man/nova-manage.rst create mode 100644 etc/cinder/api-paste.ini create mode 100644 etc/cinder/cinder.conf.sample create mode 100644 etc/cinder/logging_sample.conf create mode 100644 etc/cinder/policy.json create mode 100644 openstack-common.conf create mode 100644 pylintrc create mode 100755 run_tests.sh create mode 100644 setup.cfg create mode 100644 setup.py create mode 100755 tools/clean-vlans create mode 100755 tools/clean_file_locks.py create mode 100644 tools/conf/create_conf.py create mode 100755 tools/conf/generate_sample.sh create mode 100755 tools/enable-pre-commit-hook.sh create mode 100755 tools/hacking.py create mode 100644 tools/install_venv.py create mode 100644 tools/pip-requires create mode 100755 tools/rfc.sh create mode 100644 tools/test-requires create mode 100755 tools/with_venv.sh create mode 100644 tox.ini diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000000..97cda3c7c18 --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +*.pyc +*.DS_Store +local_settings.py +CA/ +keeper +instances +keys +build/* +build-stamp +cinder.egg-info +nova.egg-info +.cinder-venv +.nova-venv +.venv +.tox +*.sqlite +*.log +*.mo +tools/conf/cinder.conf* +tools/conf/nova.conf* +cover/* +dist/* +.coverage +covhtml diff --git a/.gitreview b/.gitreview new file mode 100644 index 00000000000..eecf939449d --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.openstack.org +port=29418 +project=openstack/cinder.git diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000000..f38c68aeb49 --- /dev/null +++ b/.mailmap @@ -0,0 +1,81 @@ +# Format is: +# +# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Masumoto + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Authors b/Authors new file mode 100644 index 00000000000..f4bd7344f29 --- /dev/null +++ b/Authors @@ -0,0 +1,211 @@ +Aaron Lee +Adam Gandelman +Adam Johnson +Adrian Smith +Ahmad Hassan +Alex Meade +Alexander Sakhnov +Alexander Kovalev +Alvaro Lopez Garcia +Andrew Bogott +Andrew Clay Shafer +Andrey Brindeyev +Andy Smith +Andy Southgate +Anne Gentle +Ante Karamatić +Anthony Young +Antony Messerli +Armando Migliaccio +Arvind Somya +Asbjørn Sannes +Ben McGraw +Ben Swartzlander +Bilal Akhtar +Brad Hall +Brad McConnell +Brendan Maguire +Brian Elliott +Brian Lamar +Brian Schott +Brian Waldon +Chiradeep Vittal +Chmouel Boudjnah +Chris Behrens +Christian Berendt +Chris Fattarsi +Christopher MacGown +Chuck Short +Cole Robinson +Cor Cornelisse +Cory Wright +Dan Prince +Dan Wendlandt +Daniel P. Berrange +Dave Lapsley +Dave Walker +David Pravec +David Subiros +Dean Troyer +Deepak Garg +Derek Higgins +Devdeep Singh +Devendra Modium +Devin Carlen +Dina Belova +Donal Lafferty +Dong-In David Kang +Doug Hellmann +Duncan McGreggor +Ed Leafe +Edouard Thuleau +Eldar Nugaev +Eoghan Glynn +Eric Day +Eric Windisch +Evan Callicoat +Ewan Mellor +François Charlier +Gabe Westmaas +Gabriel Hurley +Gary Kotton +Gaurav Gupta +Greg Althaus +Hengqing Hu +Hisaharu Ishii +Hisaki Ohara +Ilya Alekseyev +Ionuț Arțăriși +Isaku Yamahata +Ivan Kolodyazhny +J. Daniel Schmidt +Jake Dahn +James E. Blair +Jason Cannavale +Jason Koelker +Jay Pipes +JC Martin +Jesse Andrews +Jimmy Bergman +Joe Gordon +Joe Heck +Joel Moore +Johannes Erdfelt +John Dewey +John Garbutt +John Griffith +John Kennedy +John Tran +Jonathan Bryce +Jordan Rinke +Joseph Suh +Joseph W. Breu +Josh Durgin +Josh Kearney +Josh Kleinpeter +Joshua Harlow +Joshua McKenty +Juan G. Hernando Rivero +Julien Danjou +Justin Santa Barbara +Justin Shepherd +Kei Masumoto +Keisuke Tagami +masumoto +masukotm +Ken Pepple +Kevin Bringard +Kevin L. Mitchell +Kiall Mac Innes +Kirill Shileev +Koji Iida +Liam Kelleher +Likitha Shetty +Loganathan Parthipan +Lorin Hochstein +Lvov Maxim +Mandar Vaze +Mandell Degerness +Mark McClain +Mark McLoughlin +Mark Washenberger +Maru Newby +Masanori Itoh +Matt Dietz +Matt Stephenson +Matthew Hooker +Michael Basnight +Michael Gundlach +Michael Still +Mike Lundy +Mike Milner +Mike Pittaro +Mike Scherbakov +Mikyung Kang +Mohammed Naser +Monsyne Dragon +Monty Taylor +MORITA Kazutaka +MotoKen +Muneyuki Noguchi +Nachi Ueno +Naveed Massjouni +Nick Bartos +Nikhil Komawar +Nikolay Sokolov +Nirmal Ranganathan +Ollie Leahy +Pádraig Brady +Paul McMillan +Paul Voccio +Peng Yong +Philip Knouff +Renier Morales +Renuka Apte +Ricardo Carrillo Cruz +Rick Clark +Rick Harris +Rob Kost +Robert Esker +Russell Bryant +Russell Sim +Ryan Lane +Ryan Lucio +Ryu Ishimoto +Salvatore Orlando +Sandy Walsh +Sateesh Chodapuneedi +Scott Moser +Sean Dague +Soren Hansen +Stanislaw Pitucha +Stephanie Reese +Sumit Naiksatam +Thierry Carrez +Tim Simpson +Todd Willey +Tomoe Sugihara +Tomoya Masuko +Thorsten Tarrach +Trey Morris +Troy Toman +Tushar Patil +Unmesh Gurjar +Vasiliy Shlykov +Vishvananda Ishaya +Vivek Y S +Vladimir Popovski +Vaddi kiran +William Henry +William Kelly +William Wolf +Yaguang Tang +Yoshiaki Tamura +Youcef Laribi +Yun Mao +Yun Shen +Yuriy Taraday +Zed Shaw +Zhixue Wu +Zhongyue Luo +Ziad Sawalha diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000000..e9c0162f54e --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,213 @@ +Cinder Style Commandments +======================= + +- Step 1: Read http://www.python.org/dev/peps/pep-0008/ +- Step 2: Read http://www.python.org/dev/peps/pep-0008/ again +- Step 3: Read on + + +General +------- +- Put two newlines between top-level code (funcs, classes, etc) +- Put one newline between methods in classes and anywhere else +- Do not write "except:", use "except Exception:" at the very least +- Include your name with TODOs as in "#TODO(termie)" +- Do not shadow a built-in or reserved word. Example:: + + def list(): + return [1, 2, 3] + + mylist = list() # BAD, shadows `list` built-in + + class Foo(object): + def list(self): + return [1, 2, 3] + + mylist = Foo().list() # OKAY, does not shadow built-in + + +Imports +------- +- Do not import objects, only modules (*) +- Do not import more than one module per line (*) +- Do not make relative imports +- Order your imports by the full module path +- Organize your imports according to the following template + +(*) exceptions are: + +- imports from ``migrate`` package +- imports from ``sqlalchemy`` package +- imports from ``cinder.db.sqlalchemy.session`` module + +Example:: + + # vim: tabstop=4 shiftwidth=4 softtabstop=4 + {{stdlib imports in human alphabetical order}} + \n + {{third-party lib imports in human alphabetical order}} + \n + {{cinder imports in human alphabetical order}} + \n + \n + {{begin your code}} + + +Human Alphabetical Order Examples +--------------------------------- +Example:: + + import httplib + import logging + import random + import StringIO + import time + import unittest + + import eventlet + import webob.exc + + import cinder.api.ec2 + from cinder.api import openstack + from cinder.auth import users + from cinder.endpoint import cloud + import cinder.flags + from cinder import test + + +Docstrings +---------- +Example:: + + """A one line docstring looks like this and ends in a period.""" + + + """A multi line docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. + + When writing the docstring for a class, an extra line should be placed + after the closing quotations. For more in-depth explanations for these + decisions see http://www.python.org/dev/peps/pep-0257/ + + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + + :param foo: the foo parameter + :param bar: the bar parameter + :returns: return_type -- description of the return value + :returns: description of the return value + :raises: AttributeError, KeyError + """ + + +Dictionaries/Lists +------------------ +If a dictionary (dict) or list object is longer than 80 characters, its items +should be split with newlines. Embedded iterables should have their items +indented. Additionally, the last item in the dictionary should have a trailing +comma. This increases readability and simplifies future diffs. + +Example:: + + my_dictionary = { + "image": { + "name": "Just a Snapshot", + "size": 2749573, + "properties": { + "user_id": 12, + "arch": "x86_64", + }, + "things": [ + "thing_one", + "thing_two", + ], + "status": "ACTIVE", + }, + } + + +Calling Methods +--------------- +Calls to methods 80 characters or longer should format each argument with +newlines. This is not a requirement, but a guideline:: + + unnecessarily_long_function_name('string one', + 'string two', + kwarg1=constants.ACTIVE, + kwarg2=['a', 'b', 'c']) + + +Rather than constructing parameters inline, it is better to break things up:: + + list_of_strings = [ + 'what_a_long_string', + 'not as long', + ] + + dict_of_numbers = { + 'one': 1, + 'two': 2, + 'twenty four': 24, + } + + object_one.call_a_method('string three', + 'string four', + kwarg1=list_of_strings, + kwarg2=dict_of_numbers) + + +Internationalization (i18n) Strings +----------------------------------- +In order to support multiple languages, we have a mechanism to support +automatic translations of exception and log strings. + +Example:: + + msg = _("An error occurred") + raise HTTPBadRequest(explanation=msg) + +If you have a variable to place within the string, first internationalize the +template string then do the replacement. + +Example:: + + msg = _("Missing parameter: %s") % ("flavor",) + LOG.error(msg) + +If you have multiple variables to place in the string, use keyword parameters. +This helps our translators reorder parameters when needed. + +Example:: + + msg = _("The server with id %(s_id)s has no key %(m_key)s") + LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) + + +Creating Unit Tests +------------------- +For every new feature, unit tests should be created that both test and +(implicitly) document the usage of said feature. If submitting a patch for a +bug that had no unit test, a new passing unit test should be added. If a +submitted bug fix does have a unit test, be sure to add a new one that fails +without the patch and passes with the patch. + +For more information on creating unit tests and utilizing the testing +infrastructure in OpenStack Cinder, please read cinder/testing/README.rst. + + +openstack-common +---------------- + +A number of modules from openstack-common are imported into the project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000000..68c771a0999 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000000..97278f3f4ab --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,37 @@ +include HACKING.rst +include LICENSE run_tests.sh +include README.rst +include MANIFEST.in pylintrc Authors +include openstack-common.conf +include babel.cfg tox.ini +graft cinder/CA +graft doc +graft smoketests +graft tools +graft etc +graft contrib +graft plugins +graft cinder/api/openstack/*/schemas +include cinder/auth/*.schema +include cinder/auth/cinderrc.template +include cinder/auth/opendj.sh +include cinder/auth/slap.sh +include cinder/db/sqlalchemy/migrate_repo/migrate.cfg +include cinder/db/sqlalchemy/migrate_repo/README +include cinder/db/sqlalchemy/migrate_repo/versions/*.sql +include cinder/openstack/common/README +include cinder/virt/interfaces.template +include cinder/virt/libvirt*.xml.template +include cinder/virt/cpuinfo.xml.template +include cinder/testing/README.rst +include cinder/tests/db/cinder.austin.sqlite +include cinder/tests/image/*.tar.gz +include cinder/tests/policy.json +include cinder/tests/test_migrations.conf +include cinder/tests/xenapi/vm_rrd.xml +include plugins/xenapi/README +include plugins/xenapi/etc/xapi.d/plugins/objectstore +include plugins/xenapi/etc/xapi.d/plugins/pluginlib_cinder.py +global-exclude *.pyc + +recursive-include cinder/locale * diff --git a/README.rst b/README.rst new file mode 100644 index 00000000000..822401a80ae --- /dev/null +++ b/README.rst @@ -0,0 +1,21 @@ +The Choose Your Own Adventure README for Cinder +=============================================== + +You have come across a storage service for an open cloud computing service. +It has identified itself as "Cinder." It was abstracted from the Nova project. + +To monitor it from a distance: follow `@openstack `_ on twitter. + +To tame it for use in your own cloud: read http://docs.openstack.org + +To study its anatomy: read http://cinder.openstack.org + +To dissect it in detail: visit http://github.com/openstack/cinder + +To taunt it with its weaknesses: use http://bugs.launchpad.net/cinder + +To watch it: http://jenkins.openstack.org + +To hack at it: read HACKING + +To cry over its pylint problems: http://jenkins.openstack.org/job/cinder-pylint/violations diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 00000000000..15cd6cb76b9 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/cinder-all b/bin/cinder-all new file mode 100755 index 00000000000..8bec9bbb39b --- /dev/null +++ b/bin/cinder-all @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack, LLC +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for All cinder services. + +This script attempts to start all the cinder services in one process. Each +service is started in its own greenthread. Please note that exceptions and +sys.exit() on the starting of a service are logged and the script will +continue attempting to launch the rest of the services. + +""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + + +LOG = logging.getLogger('cinder.all') + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + servers = [] + # cinder-api + for api in flags.FLAGS.enabled_apis: + try: + servers.append(service.WSGIService(api)) + except (Exception, SystemExit): + logging.exception(_('Failed to load %s') % '%s-api' % api) + + for binary in ['cinder-volume', 'cinder-scheduler']: + try: + servers.append(service.Service.create(binary=binary)) + except (Exception, SystemExit): + LOG.exception(_('Failed to load %s'), binary) + service.serve(*servers) + service.wait() diff --git a/bin/cinder-api b/bin/cinder-api new file mode 100755 index 00000000000..ba28b1a445d --- /dev/null +++ b/bin/cinder-api @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Cinder OS API.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.WSGIService('osapi_volume') + service.serve(server) + service.wait() diff --git a/bin/cinder-manage b/bin/cinder-manage new file mode 100755 index 00000000000..0cc6d82c892 --- /dev/null +++ b/bin/cinder-manage @@ -0,0 +1,635 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Interactive shell based on Django: +# +# Copyright (c) 2005, the Lawrence Journal-World +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. Neither the name of Django nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +""" + CLI interface for cinder management. +""" + +import ast +import errno +import gettext +import json +import math +import netaddr +import optparse +import os +import StringIO +import sys + + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('cinder', unicode=1) + +from cinder.compat import flagfile +from cinder import context +from cinder import db +from cinder.db import migration +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import importutils +from cinder import quota +from cinder import rpc +from cinder import utils +from cinder import version +from cinder.volume import volume_types + +FLAGS = flags.FLAGS + + +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('options', []).insert(0, (args, kwargs)) + return func + return _decorator + + +def param2id(object_id): + """Helper function to convert various id types to internal id. + args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' + """ + if '-' in object_id: + # FIXME(ja): mapping occurs in nova? + pass + else: + return int(object_id) + + +class ShellCommands(object): + def bpython(self): + """Runs a bpython shell. + + Falls back to Ipython/python shell if unavailable""" + self.run('bpython') + + def ipython(self): + """Runs an Ipython shell. + + Falls back to Python shell if unavailable""" + self.run('ipython') + + def python(self): + """Runs a python shell. + + Falls back to Python shell if unavailable""" + self.run('python') + + @args('--shell', dest="shell", metavar='', + help='Python shell') + def run(self, shell=None): + """Runs a Python interactive interpreter.""" + if not shell: + shell = 'bpython' + + if shell == 'bpython': + try: + import bpython + bpython.embed() + except ImportError: + shell = 'ipython' + if shell == 'ipython': + try: + import IPython + # Explicitly pass an empty list as arguments, because + # otherwise IPython would use sys.argv from this script. + shell = IPython.Shell.IPShell(argv=[]) + shell.mainloop() + except ImportError: + shell = 'python' + + if shell == 'python': + import code + try: + # Try activating rlcompleter, because it's handy. + import readline + except ImportError: + pass + else: + # We don't have to wrap the following import in a 'try', + # because we already know 'readline' was imported successfully. + import rlcompleter + readline.parse_and_bind("tab:complete") + code.interact() + + @args('--path', dest='path', metavar='', help='Script path') + def script(self, path): + """Runs the script from the specifed path with flags set properly. + arguments: path""" + exec(compile(open(path).read(), path, 'exec'), locals(), globals()) + + +def _db_error(caught_exception): + print caught_exception + print _("The above error may show that the database has not " + "been created.\nPlease create a database using " + "'cinder-manage db sync' before running this command.") + exit(1) + + +class HostCommands(object): + """List hosts""" + + def list(self, zone=None): + """Show a list of all physical hosts. Filter by zone. + args: [zone]""" + print "%-25s\t%-15s" % (_('host'), + _('zone')) + ctxt = context.get_admin_context() + now = utils.utcnow() + services = db.service_get_all(ctxt) + if zone: + services = [s for s in services if s['availability_zone'] == zone] + hosts = [] + for srv in services: + if not [h for h in hosts if h['host'] == srv['host']]: + hosts.append(srv) + + for h in hosts: + print "%-25s\t%-15s" % (h['host'], h['availability_zone']) + + +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + @args('--version', dest='version', metavar='', + help='Database version') + def sync(self, version=None): + """Sync the database up to the most recent version.""" + return migration.db_sync(version) + + def version(self): + """Print the current database version.""" + print migration.db_version() + + +class VersionCommands(object): + """Class for exposing the codebase version.""" + + def __init__(self): + pass + + def list(self): + print _("%(version)s (%(vcs)s)") % \ + {'version': version.version_string(), + 'vcs': version.version_string_with_vcs()} + + def __call__(self): + self.list() + + +class VolumeCommands(object): + """Methods for dealing with a cloud in an odd state""" + + @args('--volume', dest='volume_id', metavar='', + help='Volume ID') + def delete(self, volume_id): + """Delete a volume, bypassing the check that it + must be available.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + host = volume['host'] + + if not host: + print "Volume not yet assigned to host." + print "Deleting volume from database and skipping rpc." + db.volume_destroy(ctxt, param2id(volume_id)) + return + + if volume['status'] == 'in-use': + print "Volume is in-use." + print "Detach volume from instance and then try again." + return + + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + @args('--volume', dest='volume_id', metavar='', + help='Volume ID') + def reattach(self, volume_id): + """Re-attach a volume that has previously been attached + to an instance. Typically called after a compute host + has been rebooted.""" + ctxt = context.get_admin_context() + volume = db.volume_get(ctxt, param2id(volume_id)) + if not volume['instance_id']: + print "volume is not attached to an instance" + return + instance = db.instance_get(ctxt, volume['instance_id']) + host = instance['host'] + rpc.cast(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume['id'], + "mountpoint": volume['mountpoint']}}) + + +class StorageManagerCommands(object): + """Class for mangaging Storage Backends and Flavors""" + + def flavor_list(self, flavor=None): + ctxt = context.get_admin_context() + + try: + if flavor is None: + flavors = db.sm_flavor_get_all(ctxt) + else: + flavors = db.sm_flavor_get(ctxt, flavor) + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-18s\t%-20s\t%s" % (_('id'), + _('Label'), + _('Description')) + + for flav in flavors: + print "%-18s\t%-20s\t%s" % ( + flav['id'], + flav['label'], + flav['description']) + + def flavor_create(self, label, desc): + # TODO(renukaapte) flavor name must be unique + try: + db.sm_flavor_create(context.get_admin_context(), + dict(label=label, + description=desc)) + except exception.DBError, e: + _db_error(e) + + def flavor_delete(self, label): + try: + db.sm_flavor_delete(context.get_admin_context(), label) + + except exception.DBError, e: + _db_error(e) + + def _splitfun(self, item): + i = item.split("=") + return i[0:2] + + def backend_list(self, backend_conf_id=None): + ctxt = context.get_admin_context() + + try: + if backend_conf_id is None: + backends = db.sm_backend_conf_get_all(ctxt) + else: + backends = db.sm_backend_conf_get(ctxt, backend_conf_id) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (_('id'), + _('Flavor id'), + _('SR UUID'), + _('SR Type'), + _('Config Parameters'),) + + for b in backends: + print "%-5s\t%-10s\t%-40s\t%-10s\t%s" % (b['id'], + b['flavor_id'], + b['sr_uuid'], + b['sr_type'], + b['config_params'],) + + def backend_add(self, flavor_label, sr_type, *args): + # TODO(renukaapte) Add backend_introduce. + ctxt = context.get_admin_context() + params = dict(map(self._splitfun, args)) + sr_uuid = utils.gen_uuid() + + if flavor_label is None: + print "error: backend needs to be associated with flavor" + sys.exit(2) + + try: + flavors = db.sm_flavor_get(ctxt, flavor_label) + + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + + config_params = " ".join(['%s=%s' % + (key, params[key]) for key in params]) + + if 'sr_uuid' in params: + sr_uuid = params['sr_uuid'] + try: + backend = db.sm_backend_conf_get_by_sr(ctxt, sr_uuid) + except exception.DBError, e: + _db_error(e) + + if backend: + print 'Backend config found. Would you like to recreate this?' + print '(WARNING:Recreating will destroy all VDIs on backend!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_update(ctxt, backend['id'], + dict(created=False, + flavor_id=flavors['id'], + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + return + + else: + print 'Backend config not found. Would you like to create it?' + + print '(WARNING: Creating will destroy all data on backend!!!)' + c = raw_input('Proceed? (y/n) ') + if c == 'y' or c == 'Y': + try: + db.sm_backend_conf_create(ctxt, + dict(flavor_id=flavors['id'], + sr_uuid=sr_uuid, + sr_type=sr_type, + config_params=config_params)) + except exception.DBError, e: + _db_error(e) + + def backend_remove(self, backend_conf_id): + try: + db.sm_backend_conf_delete(context.get_admin_context(), + backend_conf_id) + + except exception.DBError, e: + _db_error(e) + + +class ConfigCommands(object): + """Class for exposing the flags defined by flag_file(s).""" + + def __init__(self): + pass + + def list(self): + for key, value in FLAGS.iteritems(): + if value is not None: + print '%s = %s' % (key, value) + + @args('--infile', dest='infile', metavar='', + help='old-style flagfile to convert to config') + @args('--outfile', dest='outfile', metavar='', + help='path for output file. Writes config' + 'to stdout if not specified.') + def convert(self, infile, outfile=None): + """Converts a flagfile and prints results to stdout.""" + arg = '--flagfile=%s' % infile + with flagfile.handle_flagfiles_managed([arg]) as newargs: + with open(newargs[0].split('=')[1]) as configfile: + config = configfile.read() + if outfile: + with open(outfile, 'w') as configfile: + configfile.write(config) + else: + print config, + + +class GetLogCommands(object): + """Get logging information""" + + def errors(self): + """Get all of the errors from the log files""" + error_found = 0 + if FLAGS.logdir: + logs = [x for x in os.listdir(FLAGS.logdir) if x.endswith('.log')] + for file in logs: + log_file = os.path.join(FLAGS.logdir, file) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print_name = 0 + for index, line in enumerate(lines): + if line.find(" ERROR ") > 0: + error_found += 1 + if print_name == 0: + print log_file + ":-" + print_name = 1 + print "Line %d : %s" % (len(lines) - index, line) + if error_found == 0: + print "No errors in logfiles!" + + def syslog(self, num_entries=10): + """Get of the cinder syslog events""" + entries = int(num_entries) + count = 0 + log_file = '' + if os.path.exists('/var/log/syslog'): + log_file = '/var/log/syslog' + elif os.path.exists('/var/log/messages'): + log_file = '/var/log/messages' + else: + print "Unable to find system log file!" + sys.exit(1) + lines = [line.strip() for line in open(log_file, "r")] + lines.reverse() + print "Last %s cinder syslog entries:-" % (entries) + for line in lines: + if line.find("cinder") > 0: + count += 1 + print "%s" % (line) + if count == entries: + break + + if count == 0: + print "No cinder entries in syslog!" + + +CATEGORIES = [ + ('config', ConfigCommands), + ('db', DbCommands), + ('host', HostCommands), + ('logs', GetLogCommands), + ('shell', ShellCommands), + ('sm', StorageManagerCommands), + ('version', VersionCommands), + ('volume', VolumeCommands), +] + + +def lazy_match(name, key_value_tuples): + """Finds all objects that have a key that case insensitively contains + [name] key_value_tuples is a list of tuples of the form (key, value) + returns a list of tuples of the form (key, value)""" + result = [] + for (k, v) in key_value_tuples: + if k.lower().find(name.lower()) == 0: + result.append((k, v)) + if len(result) == 0: + print "%s does not match any options:" % name + for k, _v in key_value_tuples: + print "\t%s" % k + sys.exit(2) + if len(result) > 1: + print "%s matched multiple options:" % name + for k, _v in result: + print "\t%s" % k + sys.exit(2) + return result + + +def methods_of(obj): + """Get all callable methods of an object that don't start with underscore + returns a list of tuples of the form (method_name, method)""" + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result + + +def main(): + """Parse options and call the appropriate class/method.""" + flagfile = utils.default_flagfile() + + if flagfile and not os.access(flagfile, os.R_OK): + st = os.stat(flagfile) + print "Could not read %s. Re-running with sudo" % flagfile + try: + os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) + except Exception: + print 'sudo failed, continuing as if nothing happened' + + rpc.register_opts(FLAGS) + + try: + argv = FLAGS(sys.argv) + logging.setup() + except IOError, e: + if e.errno == errno.EACCES: + print _('Please re-run cinder-manage as root.') + sys.exit(2) + raise + script_name = argv.pop(0) + if len(argv) < 1: + print _("\nOpenStack Cinder version: %(version)s (%(vcs)s)\n") % \ + {'version': version.version_string(), + 'vcs': version.version_string_with_vcs()} + print script_name + " category action []" + print _("Available categories:") + for k, _v in CATEGORIES: + print "\t%s" % k + sys.exit(2) + category = argv.pop(0) + matches = lazy_match(category, CATEGORIES) + # instantiate the command group object + category, fn = matches[0] + command_object = fn() + actions = methods_of(command_object) + if len(argv) < 1: + if hasattr(command_object, '__call__'): + action = '' + fn = command_object.__call__ + else: + print script_name + " category action []" + print _("Available actions for %s category:") % category + for k, _v in actions: + print "\t%s" % k + sys.exit(2) + else: + action = argv.pop(0) + matches = lazy_match(action, actions) + action, fn = matches[0] + + # For not decorated methods + options = getattr(fn, 'options', []) + + usage = "%%prog %s %s [options]" % (category, action) + parser = optparse.OptionParser(usage=usage) + for ar, kw in options: + parser.add_option(*ar, **kw) + (opts, fn_args) = parser.parse_args(argv) + fn_kwargs = vars(opts) + + for k, v in fn_kwargs.items(): + if v is None: + del fn_kwargs[k] + elif isinstance(v, basestring): + fn_kwargs[k] = v.decode('utf-8') + else: + fn_kwargs[k] = v + + fn_args = [arg.decode('utf-8') for arg in fn_args] + + # call the action with the remaining arguments + try: + fn(*fn_args, **fn_kwargs) + rpc.cleanup() + sys.exit(0) + except TypeError: + print _("Possible wrong number of arguments supplied") + print fn.__doc__ + parser.print_help() + raise + except Exception: + print _("Command failed, please check log for more info") + raise + +if __name__ == '__main__': + main() diff --git a/bin/cinder-rootwrap b/bin/cinder-rootwrap new file mode 100755 index 00000000000..537324c6c10 --- /dev/null +++ b/bin/cinder-rootwrap @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Root wrapper for Cinder + + Uses modules in cinder.rootwrap containing filters for commands + that cinder is allowed to run as another user. + + To switch to using this, you should: + * Set "--root_helper=sudo cinder-rootwrap" in cinder.conf + * Allow cinder to run cinder-rootwrap as root in cinder_sudoers: + cinder ALL = (root) NOPASSWD: /usr/bin/cinder-rootwrap + (all other commands can be removed from this file) + + To make allowed commands node-specific, your packaging should only + install cinder/rootwrap/{compute,network,volume}.py respectively on + compute, network and volume nodes (i.e. cinder-api nodes should not + have any of those files installed). +""" + +import os +import subprocess +import sys + + +RC_UNAUTHORIZED = 99 +RC_NOCOMMAND = 98 + +if __name__ == '__main__': + # Split arguments, require at least a command + execname = sys.argv.pop(0) + if len(sys.argv) == 0: + print "%s: %s" % (execname, "No command specified") + sys.exit(RC_NOCOMMAND) + + userargs = sys.argv[:] + + # Add ../ to sys.path to allow running from branch + possible_topdir = os.path.normpath(os.path.join(os.path.abspath(execname), + os.pardir, os.pardir)) + if os.path.exists(os.path.join(possible_topdir, "cinder", "__init__.py")): + sys.path.insert(0, possible_topdir) + + from cinder.rootwrap import wrapper + + # Execute command if it matches any of the loaded filters + filters = wrapper.load_filters() + filtermatch = wrapper.match_filter(filters, userargs) + if filtermatch: + obj = subprocess.Popen(filtermatch.get_command(userargs), + stdin=sys.stdin, + stdout=sys.stdout, + stderr=sys.stderr, + env=filtermatch.get_environment(userargs)) + obj.wait() + sys.exit(obj.returncode) + + print "Unauthorized command: %s" % ' '.join(userargs) + sys.exit(RC_UNAUTHORIZED) diff --git a/bin/cinder-scheduler b/bin/cinder-scheduler new file mode 100755 index 00000000000..f423bef63e9 --- /dev/null +++ b/bin/cinder-scheduler @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Scheduler.""" + +import eventlet +eventlet.monkey_patch() + +import gettext +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('cinder', unicode=1) + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.Service.create(binary='cinder-scheduler') + service.serve(server) + service.wait() diff --git a/bin/cinder-volume b/bin/cinder-volume new file mode 100755 index 00000000000..53aa635a6fa --- /dev/null +++ b/bin/cinder-volume @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Cinder Volume.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'cinder', '__init__.py')): + sys.path.insert(0, possible_topdir) + + +from cinder import flags +from cinder import log as logging +from cinder import service +from cinder import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.Service.create(binary='cinder-volume') + service.serve(server) + service.wait() diff --git a/bin/clear_rabbit_queues b/bin/clear_rabbit_queues new file mode 100755 index 00000000000..d652d6e14c7 --- /dev/null +++ b/bin/clear_rabbit_queues @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Admin/debug script to wipe rabbitMQ (AMQP) queues cinder uses. + This can be used if you need to change durable options on queues, + or to wipe all messages in the queue system if things are in a + serious bad way. + +""" + +import datetime +import gettext +import os +import sys +import time + +# If ../cinder/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'cinder', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('cinder', unicode=1) + + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import rpc +from cinder import utils + + +delete_exchange_opt = \ + cfg.BoolOpt('delete_exchange', + default=False, + help='delete cinder exchange too.') + +FLAGS = flags.FLAGS +FLAGS.register_cli_opt(delete_exchange_opt) + + +def delete_exchange(exch): + conn = rpc.create_connection() + x = conn.get_channel() + x.exchange_delete(exch) + + +def delete_queues(queues): + conn = rpc.create_connection() + x = conn.get_channel() + for q in queues: + x.queue_delete(q) + +if __name__ == '__main__': + utils.default_flagfile() + args = flags.FLAGS(sys.argv) + logging.setup() + rpc.register_opts(flags.FLAGS) + delete_queues(args[1:]) + if FLAGS.delete_exchange: + delete_exchange(FLAGS.control_exchange) diff --git a/cinder/__init__.py b/cinder/__init__.py new file mode 100644 index 00000000000..238c2812e69 --- /dev/null +++ b/cinder/__init__.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder` -- Cloud IaaS Platform +=================================== + +.. automodule:: cinder + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +import gettext +import logging + + +gettext.install('cinder', unicode=1) +# NOTE(jkoelker) This configures the root logger if it is not already +# configured so messages from logging setup can be written +# to the console +logging.basicConfig(format='%(message)s') diff --git a/cinder/api/__init__.py b/cinder/api/__init__.py new file mode 100644 index 00000000000..747015af53e --- /dev/null +++ b/cinder/api/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/auth.py b/cinder/api/auth.py new file mode 100644 index 00000000000..1b8f303e23c --- /dev/null +++ b/cinder/api/auth.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Common Auth Middleware. + +""" + +import webob.dec +import webob.exc + +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import wsgi + + +use_forwarded_for_opt = cfg.BoolOpt('use_forwarded_for', + default=False, + help='Treat X-Forwarded-For as the canonical remote address. ' + 'Only enable this if you have a sanitizing proxy.') + +FLAGS = flags.FLAGS +FLAGS.register_opt(use_forwarded_for_opt) +LOG = logging.getLogger(__name__) + + +def pipeline_factory(loader, global_conf, **local_conf): + """A paste pipeline replica that keys off of auth_strategy.""" + pipeline = local_conf[FLAGS.auth_strategy] + if not FLAGS.api_rate_limit: + limit_name = FLAGS.auth_strategy + '_nolimit' + pipeline = local_conf.get(limit_name, pipeline) + pipeline = pipeline.split() + filters = [loader.get_filter(n) for n in pipeline[:-1]] + app = loader.get_app(pipeline[-1]) + filters.reverse() + for filter in filters: + app = filter(app) + return app + + +class InjectContext(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + req.environ['cinder.context'] = self.context + return self.application + + +class CinderKeystoneContext(wsgi.Middleware): + """Make a request context from keystone headers""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + user_id = req.headers.get('X_USER') + user_id = req.headers.get('X_USER_ID', user_id) + if user_id is None: + LOG.debug("Neither X_USER_ID nor X_USER found in request") + return webob.exc.HTTPUnauthorized() + # get the roles + roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] + if 'X_TENANT_ID' in req.headers: + # This is the new header since Keystone went to ID/Name + project_id = req.headers['X_TENANT_ID'] + else: + # This is for legacy compatibility + project_id = req.headers['X_TENANT'] + + # Get the auth token + auth_token = req.headers.get('X_AUTH_TOKEN', + req.headers.get('X_STORAGE_TOKEN')) + + # Build a context, including the auth_token... + remote_address = req.remote_addr + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + roles=roles, + auth_token=auth_token, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/openstack/__init__.py b/cinder/api/openstack/__init__.py new file mode 100644 index 00000000000..22ff5de047a --- /dev/null +++ b/cinder/api/openstack/__init__.py @@ -0,0 +1,143 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack API controllers. +""" + +import routes +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import log as logging +from cinder import wsgi as base_wsgi + + +LOG = logging.getLogger(__name__) + + +class FaultWrapper(base_wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + try: + return req.get_response(self.application) + except Exception as ex: + LOG.exception(_("Caught error: %s"), unicode(ex)) + msg_dict = dict(url=req.url, status=500) + LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict) + exc = webob.exc.HTTPInternalServerError() + # NOTE(johannes): We leave the explanation empty here on + # purpose. It could possibly have sensitive information + # that should not be returned back to the user. See + # bugs 868360 and 874472 + return wsgi.Fault(exc) + + +class APIMapper(routes.Mapper): + def routematch(self, url=None, environ=None): + if url is "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + +class ProjectMapper(APIMapper): + def resource(self, member_name, collection_name, **kwargs): + if not ('parent_resource' in kwargs): + kwargs['path_prefix'] = '{project_id}/' + else: + parent_resource = kwargs['parent_resource'] + p_collection = parent_resource['collection_name'] + p_member = parent_resource['member_name'] + kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection, + p_member) + routes.Mapper.resource(self, member_name, + collection_name, + **kwargs) + + +class APIRouter(base_wsgi.Router): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = None # override in subclasses + + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have""" + return cls() + + def __init__(self, ext_mgr=None): + if ext_mgr is None: + if self.ExtensionManager: + ext_mgr = self.ExtensionManager() + else: + raise Exception(_("Must specify an ExtensionManager class")) + + mapper = ProjectMapper() + self.resources = {} + self._setup_routes(mapper) + self._setup_ext_routes(mapper, ext_mgr) + self._setup_extensions(ext_mgr) + super(APIRouter, self).__init__(mapper) + + def _setup_ext_routes(self, mapper, ext_mgr): + for resource in ext_mgr.get_resources(): + LOG.debug(_('Extended resource: %s'), + resource.collection) + + wsgi_resource = wsgi.Resource(resource.controller) + self.resources[resource.collection] = wsgi_resource + kargs = dict( + controller=wsgi_resource, + collection=resource.collection_actions, + member=resource.member_actions) + + if resource.parent: + kargs['parent_resource'] = resource.parent + + mapper.resource(resource.collection, resource.collection, **kargs) + + if resource.custom_routes_fn: + resource.custom_routes_fn(mapper, wsgi_resource) + + def _setup_extensions(self, ext_mgr): + for extension in ext_mgr.get_controller_extensions(): + ext_name = extension.extension.name + collection = extension.collection + controller = extension.controller + + if collection not in self.resources: + LOG.warning(_('Extension %(ext_name)s: Cannot extend ' + 'resource %(collection)s: No such resource') % + locals()) + continue + + LOG.debug(_('Extension %(ext_name)s extending resource: ' + '%(collection)s') % locals()) + + resource = self.resources[collection] + resource.register_actions(controller) + resource.register_extensions(controller) + + def _setup_routes(self, mapper): + raise NotImplementedError diff --git a/cinder/api/openstack/auth.py b/cinder/api/openstack/auth.py new file mode 100644 index 00000000000..cbc20843266 --- /dev/null +++ b/cinder/api/openstack/auth.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +from cinder.api.openstack import wsgi +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder import wsgi as base_wsgi + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS +flags.DECLARE('use_forwarded_for', 'cinder.api.auth') + + +class NoAuthMiddleware(base_wsgi.Middleware): + """Return a fake token if one isn't specified.""" + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if 'X-Auth-Token' not in req.headers: + user_id = req.headers.get('X-Auth-User', 'admin') + project_id = req.headers.get('X-Auth-Project-Id', 'admin') + os_url = os.path.join(req.url, project_id) + res = webob.Response() + # NOTE(vish): This is expecting and returning Auth(1.1), whereas + # keystone uses 2.0 auth. We should probably allow + # 2.0 auth here as well. + res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) + res.headers['X-Server-Management-Url'] = os_url + res.content_type = 'text/plain' + res.status = '204' + return res + + token = req.headers['X-Auth-Token'] + user_id, _sep, project_id = token.partition(':') + project_id = project_id or user_id + remote_address = getattr(req, 'remote_address', '127.0.0.1') + if FLAGS.use_forwarded_for: + remote_address = req.headers.get('X-Forwarded-For', remote_address) + ctx = context.RequestContext(user_id, + project_id, + is_admin=True, + remote_address=remote_address) + + req.environ['cinder.context'] = ctx + return self.application diff --git a/cinder/api/openstack/common.py b/cinder/api/openstack/common.py new file mode 100644 index 00000000000..ce2d2bd934d --- /dev/null +++ b/cinder/api/openstack/common.py @@ -0,0 +1,380 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import os +import re +import urlparse + +import webob +from xml.dom import minidom + +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import flags +from cinder import log as logging +from cinder import quota + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +XML_NS_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def get_pagination_params(request): + """Return marker, limit tuple from request. + + :param request: `wsgi.Request` possibly containing 'marker' and 'limit' + GET variables. 'marker' is the id of the last element + the client has seen, and 'limit' is the maximum number + of items to return. If 'limit' is not specified, 0, or + > max_limit, we default to max_limit. Negative values + for either marker or limit will cause + exc.HTTPBadRequest() exceptions to be raised. + + """ + params = {} + if 'limit' in request.GET: + params['limit'] = _get_limit_param(request) + if 'marker' in request.GET: + params['marker'] = _get_marker_param(request) + return params + + +def _get_limit_param(request): + """Extract integer limit from request or fail""" + try: + limit = int(request.GET['limit']) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + return limit + + +def _get_marker_param(request): + """Extract marker id from request or fail""" + return request.GET['marker'] + + +def limited(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to requested offset and limit. + + :param items: A sliceable entity + :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' + GET variables. 'offset' is where to start in the list, + and 'limit' is the maximum number of items to return. If + 'limit' is not specified, 0, or > max_limit, we default + to max_limit. Negative values for either offset or limit + will cause exc.HTTPBadRequest() exceptions to be raised. + :kwarg max_limit: The maximum number of items to return from 'items' + """ + try: + offset = int(request.GET.get('offset', 0)) + except ValueError: + msg = _('offset param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + msg = _('limit param must be an integer') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _('limit param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _('offset param must be positive') + raise webob.exc.HTTPBadRequest(explanation=msg) + + limit = min(max_limit, limit or max_limit) + range_end = offset + limit + return items[offset:range_end] + + +def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + params = get_pagination_params(request) + + limit = params.get('limit', max_limit) + marker = params.get('marker') + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if 'flavorid' in item: + if item['flavorid'] == marker: + start_index = i + 1 + break + elif item['id'] == marker or item.get('uuid') == marker: + start_index = i + 1 + break + if start_index < 0: + msg = _('marker [%s] not found') % marker + raise webob.exc.HTTPBadRequest(explanation=msg) + range_end = start_index + limit + return items[start_index:range_end] + + +def get_id_from_href(href): + """Return the id or uuid portion of a url. + + Given: 'http://www.foo.com/bar/123?q=4' + Returns: '123' + + Given: 'http://www.foo.com/bar/abc123?q=4' + Returns: 'abc123' + + """ + return urlparse.urlsplit("%s" % href).path.split('/')[-1] + + +def remove_version_from_href(href): + """Removes the first api version from the href. + + Given: 'http://www.cinder.com/v1.1/123' + Returns: 'http://www.cinder.com/123' + + Given: 'http://www.cinder.com/v1.1' + Returns: 'http://www.cinder.com' + + """ + parsed_url = urlparse.urlsplit(href) + url_parts = parsed_url.path.split('/', 2) + + # NOTE: this should match vX.X or vX + expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') + if expression.match(url_parts[1]): + del url_parts[1] + + new_path = '/'.join(url_parts) + + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) + raise ValueError(msg) + + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) + + +def get_version_from_href(href): + """Returns the api version in the href. + + Returns the api version in the href. + If no version is found, '2' is returned + + Given: 'http://www.cinder.com/123' + Returns: '2' + + Given: 'http://www.cinder.com/v1.1' + Returns: '1.1' + + """ + try: + expression = r'/v([0-9]+|[0-9]+\.[0-9]+)(/|$)' + return re.findall(expression, href)[0][0] + except IndexError: + return '2' + + +def dict_to_query_str(params): + # TODO(throughnothing): we should just use urllib.urlencode instead of this + # But currently we don't work with urlencoded url's + param_str = "" + for key, val in params.iteritems(): + param_str = param_str + '='.join([str(key), str(val)]) + '&' + + return param_str.rstrip('&') + + +def raise_http_conflict_for_instance_invalid_state(exc, action): + """Return a webob.exc.HTTPConflict instance containing a message + appropriate to return via the API based on the original + InstanceInvalidState exception. + """ + attr = exc.kwargs.get('attr') + state = exc.kwargs.get('state') + if attr and state: + msg = _("Cannot '%(action)s' while instance is in %(attr)s %(state)s") + else: + # At least give some meaningful message + msg = _("Instance is in an invalid state for '%(action)s'") + raise webob.exc.HTTPConflict(explanation=msg % locals()) + + +class MetadataDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = minidom.parseString(text) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + +class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): + def deserialize(self, text): + dom = minidom.parseString(text) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + def _extract_metadata_container(self, datastring): + dom = minidom.parseString(datastring) + metadata_node = self.find_first_child_named(dom, "metadata") + metadata = self.extract_metadata(metadata_node) + return {'body': {'metadata': metadata}} + + def create(self, datastring): + return self._extract_metadata_container(datastring) + + def update_all(self, datastring): + return self._extract_metadata_container(datastring) + + def update(self, datastring): + dom = minidom.parseString(datastring) + metadata_item = self.extract_metadata(dom) + return {'body': {'meta': metadata_item}} + + +metadata_nsmap = {None: xmlutil.XMLNS_V11} + + +class MetaItemTemplate(xmlutil.TemplateBuilder): + def construct(self): + sel = xmlutil.Selector('meta', xmlutil.get_items, 0) + root = xmlutil.TemplateElement('meta', selector=sel) + root.set('key', 0) + root.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +class MetadataTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return True + + +class MetadataTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = MetadataTemplateElement('metadata', selector='metadata') + elem = xmlutil.SubTemplateElement(root, 'meta', + selector=xmlutil.get_items) + elem.set('key', 0) + elem.text = 1 + return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap) + + +def check_snapshots_enabled(f): + @functools.wraps(f) + def inner(*args, **kwargs): + if not FLAGS.allow_instance_snapshots: + LOG.warn(_('Rejecting snapshot request, snapshots currently' + ' disabled')) + msg = _("Instance snapshots are not permitted at this time.") + raise webob.exc.HTTPBadRequest(explanation=msg) + return f(*args, **kwargs) + return inner + + +class ViewBuilder(object): + """Model API responses as dictionaries.""" + + _collection_name = None + + def _get_links(self, request, identifier): + return [{ + "rel": "self", + "href": self._get_href_link(request, identifier), + }, + { + "rel": "bookmark", + "href": self._get_bookmark_link(request, identifier), + }] + + def _get_next_link(self, request, identifier): + """Return href string with proper limit and marker params.""" + params = request.params.copy() + params["marker"] = identifier + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_compute_link_prefix) + url = os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name) + return "%s?%s" % (url, dict_to_query_str(params)) + + def _get_href_link(self, request, identifier): + """Return an href string pointing to this object.""" + prefix = self._update_link_prefix(request.application_url, + FLAGS.osapi_compute_link_prefix) + return os.path.join(prefix, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_bookmark_link(self, request, identifier): + """Create a URL that refers to a specific resource.""" + base_url = remove_version_from_href(request.application_url) + base_url = self._update_link_prefix(base_url, + FLAGS.osapi_compute_link_prefix) + return os.path.join(base_url, + request.environ["cinder.context"].project_id, + self._collection_name, + str(identifier)) + + def _get_collection_links(self, request, items, id_key="uuid"): + """Retrieve 'next' link, if applicable.""" + links = [] + limit = int(request.params.get("limit", 0)) + if limit and limit == len(items): + last_item = items[-1] + if id_key in last_item: + last_item_id = last_item[id_key] + else: + last_item_id = last_item["id"] + links.append({ + "rel": "next", + "href": self._get_next_link(request, last_item_id), + }) + return links + + def _update_link_prefix(self, orig_url, prefix): + if not prefix: + return orig_url + url_parts = list(urlparse.urlsplit(orig_url)) + prefix_parts = list(urlparse.urlsplit(prefix)) + url_parts[0:2] = prefix_parts[0:2] + return urlparse.urlunsplit(url_parts) diff --git a/cinder/api/openstack/compute/__init__.py b/cinder/api/openstack/compute/__init__.py new file mode 100644 index 00000000000..7372b0c97ef --- /dev/null +++ b/cinder/api/openstack/compute/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Compute API. +""" + +from cinder.api.openstack.compute import versions diff --git a/cinder/api/openstack/compute/schemas/atom-link.rng b/cinder/api/openstack/compute/schemas/atom-link.rng new file mode 100644 index 00000000000..edba5eee6c4 --- /dev/null +++ b/cinder/api/openstack/compute/schemas/atom-link.rng @@ -0,0 +1,141 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + [^:]* + + + + + + .+/.+ + + + + + + [A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})* + + + + + + + + + + + + xml:base + xml:lang + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/extension.rng b/cinder/api/openstack/compute/schemas/v1.1/extension.rng new file mode 100644 index 00000000000..b16d8c13006 --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/extensions.rng b/cinder/api/openstack/compute/schemas/v1.1/extensions.rng new file mode 100644 index 00000000000..8538eaf2dad --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ + + + + + diff --git a/cinder/api/openstack/compute/schemas/v1.1/metadata.rng b/cinder/api/openstack/compute/schemas/v1.1/metadata.rng new file mode 100644 index 00000000000..b2f5d702a2f --- /dev/null +++ b/cinder/api/openstack/compute/schemas/v1.1/metadata.rng @@ -0,0 +1,9 @@ + + + + + + + + diff --git a/cinder/api/openstack/compute/versions.py b/cinder/api/openstack/compute/versions.py new file mode 100644 index 00000000000..d5108c0cfac --- /dev/null +++ b/cinder/api/openstack/compute/versions.py @@ -0,0 +1,244 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree + +from cinder.api.openstack.compute.views import versions as views_versions +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil + + +LINKS = { + 'v2.0': { + 'pdf': 'http://docs.openstack.org/' + 'api/openstack-compute/1.1/os-compute-devguide-1.1.pdf', + 'wadl': 'http://docs.openstack.org/' + 'api/openstack-compute/1.1/wadl/os-compute-1.1.wadl', + }, +} + + +VERSIONS = { + "v2.0": { + "id": "v2.0", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": LINKS['v2.0']['pdf'], + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": LINKS['v2.0']['wadl'], + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute+xml;version=2", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute+json;version=2", + } + ], + } +} + + +class MediaTypesTemplateElement(xmlutil.TemplateElement): + def will_render(self, datum): + return 'media-types' in datum + + +def make_version(elem): + elem.set('id') + elem.set('status') + elem.set('updated') + + mts = MediaTypesTemplateElement('media-types') + elem.append(mts) + + mt = xmlutil.SubTemplateElement(mts, 'media-type', selector='media-types') + mt.set('base') + mt.set('type') + + xmlutil.make_links(elem, 'links') + + +version_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class VersionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('version', selector='version') + make_version(root) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class VersionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('versions') + elem = xmlutil.SubTemplateElement(root, 'version', selector='versions') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class ChoicesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('choices') + elem = xmlutil.SubTemplateElement(root, 'version', selector='choices') + make_version(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=version_nsmap) + + +class AtomSerializer(wsgi.XMLDictSerializer): + + NSMAP = {None: xmlutil.XMLNS_ATOM} + + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} + if not xmlns: + self.xmlns = wsgi.XMLNS_ATOM + else: + self.xmlns = xmlns + + def _get_most_recent_update(self, versions): + recent = None + for version in versions: + updated = datetime.datetime.strptime(version['updated'], + '%Y-%m-%dT%H:%M:%SZ') + if not recent: + recent = updated + elif updated > recent: + recent = updated + + return recent.strftime('%Y-%m-%dT%H:%M:%SZ') + + def _get_base_url(self, link_href): + # Make sure no trailing / + link_href = link_href.rstrip('/') + return link_href.rsplit('/', 1)[0] + '/' + + def _create_feed(self, versions, feed_title, feed_id): + feed = etree.Element('feed', nsmap=self.NSMAP) + title = etree.SubElement(feed, 'title') + title.set('type', 'text') + title.text = feed_title + + # Set this updated to the most recently updated version + recent = self._get_most_recent_update(versions) + etree.SubElement(feed, 'updated').text = recent + + etree.SubElement(feed, 'id').text = feed_id + + link = etree.SubElement(feed, 'link') + link.set('rel', 'self') + link.set('href', feed_id) + + author = etree.SubElement(feed, 'author') + etree.SubElement(author, 'name').text = 'Rackspace' + etree.SubElement(author, 'uri').text = 'http://www.rackspace.com/' + + for version in versions: + feed.append(self._create_version_entry(version)) + + return feed + + def _create_version_entry(self, version): + entry = etree.Element('entry') + etree.SubElement(entry, 'id').text = version['links'][0]['href'] + title = etree.SubElement(entry, 'title') + title.set('type', 'text') + title.text = 'Version %s' % version['id'] + etree.SubElement(entry, 'updated').text = version['updated'] + + for link in version['links']: + link_elem = etree.SubElement(entry, 'link') + link_elem.set('rel', link['rel']) + link_elem.set('href', link['href']) + if 'type' in link: + link_elem.set('type', link['type']) + + content = etree.SubElement(entry, 'content') + content.set('type', 'text') + content.text = 'Version %s %s (%s)' % (version['id'], + version['status'], + version['updated']) + return entry + + +class VersionsAtomSerializer(AtomSerializer): + def default(self, data): + versions = data['versions'] + feed_id = self._get_base_url(versions[0]['links'][0]['href']) + feed = self._create_feed(versions, 'Available API Versions', feed_id) + return self._to_xml(feed) + + +class VersionAtomSerializer(AtomSerializer): + def default(self, data): + version = data['version'] + feed_id = version['links'][0]['href'] + feed = self._create_feed([version], 'About This Version', feed_id) + return self._to_xml(feed) + + +class Versions(wsgi.Resource): + def __init__(self): + super(Versions, self).__init__(None) + + @wsgi.serializers(xml=VersionsTemplate, + atom=VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(VERSIONS) + + @wsgi.serializers(xml=ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(VERSIONS, req) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args + + +class VersionV2(object): + @wsgi.serializers(xml=VersionTemplate, + atom=VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(VERSIONS['v2.0']) + + +def create_resource(): + return wsgi.Resource(VersionV2()) diff --git a/cinder/api/openstack/compute/views/__init__.py b/cinder/api/openstack/compute/views/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/api/openstack/compute/views/versions.py b/cinder/api/openstack/compute/views/versions.py new file mode 100644 index 00000000000..cb2fd9f4ad7 --- /dev/null +++ b/cinder/api/openstack/compute/views/versions.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(object): + + def __init__(self, base_url): + """ + :param base_url: url of the root wsgi application + """ + self.base_url = base_url + + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [ + { + "rel": "self", + "href": self.generate_href(req.path), + }, + ], + "media-types": version['media-types'], + }) + + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in sorted(versions.keys()): + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), + }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', + }) + return dict(version=reval) + + def _build_links(self, version_data): + """Generate a container of links that refer to the provided version.""" + href = self.generate_href() + + links = [ + { + "rel": "self", + "href": href, + }, + ] + + return links + + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v2' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/openstack/extensions.py b/cinder/api/openstack/extensions.py new file mode 100644 index 00000000000..baa9510b653 --- /dev/null +++ b/cinder/api/openstack/extensions.py @@ -0,0 +1,395 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +import webob.dec +import webob.exc + +import cinder.api.openstack +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import exception as common_exception +from cinder.openstack.common import importutils +import cinder.policy + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionDescriptor(object): + """Base class that defines the contract for extensions. + + Note that you don't have to derive from this class to have a valid + extension; it is purely a convenience. + + """ + + # The name of the extension, e.g., 'Fox In Socks' + name = None + + # The alias for the extension, e.g., 'FOXNSOX' + alias = None + + # Description comes from the docstring for the class + + # The XML namespace for the extension, e.g., + # 'http://www.fox.in.socks/api/ext/pie/v1.0' + namespace = None + + # The timestamp when the extension was last updated, e.g., + # '2011-01-22T13:25:27-06:00' + updated = None + + def __init__(self, ext_mgr): + """Register extension with the extension manager.""" + + ext_mgr.register(self) + + def get_resources(self): + """List of extensions.ResourceExtension extension objects. + + Resources define new nouns, and are accessible through URLs. + + """ + resources = [] + return resources + + def get_controller_extensions(self): + """List of extensions.ControllerExtension extension objects. + + Controller extensions are used to extend existing controllers. + """ + controller_exts = [] + return controller_exts + + @classmethod + def nsmap(cls): + """Synthesize a namespace map from extension.""" + + # Start with a base nsmap + nsmap = ext_nsmap.copy() + + # Add the namespace for the extension + nsmap[cls.alias] = cls.namespace + + return nsmap + + @classmethod + def xmlname(cls, name): + """Synthesize element and attribute names.""" + + return '{%s}%s' % (cls.namespace, name) + + +def make_ext(elem): + elem.set('name') + elem.set('namespace') + elem.set('alias') + elem.set('updated') + + desc = xmlutil.SubTemplateElement(elem, 'description') + desc.text = 'description' + + xmlutil.make_links(elem, 'links') + + +ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM} + + +class ExtensionTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extension', selector='extension') + make_ext(root) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('extensions') + elem = xmlutil.SubTemplateElement(root, 'extension', + selector='extensions') + make_ext(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap) + + +class ExtensionsResource(wsgi.Resource): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + super(ExtensionsResource, self).__init__(None) + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.name + ext_data['alias'] = ext.alias + ext_data['description'] = ext.__doc__ + ext_data['namespace'] = ext.namespace + ext_data['updated'] = ext.updated + ext_data['links'] = [] # TODO(dprince): implement extension links + return ext_data + + @wsgi.serializers(xml=ExtensionsTemplate) + def index(self, req): + extensions = [] + for _alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + @wsgi.serializers(xml=ExtensionTemplate) + def show(self, req, id): + try: + # NOTE(dprince): the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions[id] + except KeyError: + raise webob.exc.HTTPNotFound() + + return dict(extension=self._translate(ext)) + + def delete(self, req, id): + raise webob.exc.HTTPNotFound() + + def create(self, req): + raise webob.exc.HTTPNotFound() + + +class ExtensionManager(object): + """Load extensions from the configured extension path. + + See cinder/tests/api/openstack/extensions/foxinsocks/extension.py for an + example extension implementation. + + """ + + def register(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return + + alias = ext.alias + LOG.audit(_('Loaded extension: %s'), alias) + + if alias in self.extensions: + raise exception.Error("Found duplicate extension: %s" % alias) + self.extensions[alias] = ext + + def get_resources(self): + """Returns a list of ResourceExtension objects.""" + + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionsResource(self))) + + for ext in self.extensions.values(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE(dprince): Extension aren't required to have resource + # extensions + pass + return resources + + def get_controller_extensions(self): + """Returns a list of ControllerExtension objects.""" + controller_exts = [] + for ext in self.extensions.values(): + try: + controller_exts.extend(ext.get_controller_extensions()) + except AttributeError: + # NOTE(Vek): Extensions aren't required to have + # controller extensions + pass + return controller_exts + + def _check_extension(self, extension): + """Checks for required methods in extension objects.""" + try: + LOG.debug(_('Ext name: %s'), extension.name) + LOG.debug(_('Ext alias: %s'), extension.alias) + LOG.debug(_('Ext description: %s'), + ' '.join(extension.__doc__.strip().split())) + LOG.debug(_('Ext namespace: %s'), extension.namespace) + LOG.debug(_('Ext updated: %s'), extension.updated) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + + return True + + def load_extension(self, ext_factory): + """Execute an extension factory. + + Loads an extension. The 'ext_factory' is the name of a + callable that will be imported and called with one + argument--the extension manager. The factory callable is + expected to call the register() method at least once. + """ + + LOG.debug(_("Loading extension %s"), ext_factory) + + # Load the factory + factory = importutils.import_class(ext_factory) + + # Call it + LOG.debug(_("Calling extension factory %s"), ext_factory) + factory(self) + + def _load_extensions(self): + """Load extensions specified on the command line.""" + + extensions = list(self.cls_list) + + for ext_factory in extensions: + try: + self.load_extension(ext_factory) + except Exception as exc: + LOG.warn(_('Failed to load extension %(ext_factory)s: ' + '%(exc)s') % locals()) + + +class ControllerExtension(object): + """Extend core controllers of cinder OpenStack API. + + Provide a way to extend existing cinder OpenStack API core + controllers. + """ + + def __init__(self, extension, collection, controller): + self.extension = extension + self.collection = collection + self.controller = controller + + +class ResourceExtension(object): + """Add top level resources to the OpenStack API in cinder.""" + + def __init__(self, collection, controller, parent=None, + collection_actions=None, member_actions=None, + custom_routes_fn=None): + if not collection_actions: + collection_actions = {} + if not member_actions: + member_actions = {} + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions + self.custom_routes_fn = custom_routes_fn + + +def wrap_errors(fn): + """Ensure errors are not passed along.""" + def wrapped(*args, **kwargs): + try: + return fn(*args, **kwargs) + except webob.exc.HTTPException: + raise + except Exception: + raise webob.exc.HTTPInternalServerError() + return wrapped + + +def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): + """Registers all standard API extensions.""" + + # Walk through all the modules in our directory... + our_dir = path[0] + for dirpath, dirnames, filenames in os.walk(our_dir): + # Compute the relative package name from the dirpath + relpath = os.path.relpath(dirpath, our_dir) + if relpath == '.': + relpkg = '' + else: + relpkg = '.%s' % '.'.join(relpath.split(os.sep)) + + # Now, consider each file in turn, only considering .py files + for fname in filenames: + root, ext = os.path.splitext(fname) + + # Skip __init__ and anything that's not .py + if ext != '.py' or root == '__init__': + continue + + # Try loading it + classname = "%s%s" % (root[0].upper(), root[1:]) + classpath = ("%s%s.%s.%s" % + (package, relpkg, root, classname)) + + if ext_list is not None and classname not in ext_list: + logger.debug("Skipping extension: %s" % classpath) + continue + + try: + ext_mgr.load_extension(classpath) + except Exception as exc: + logger.warn(_('Failed to load extension %(classpath)s: ' + '%(exc)s') % locals()) + + # Now, let's consider any subdirectories we may have... + subdirs = [] + for dname in dirnames: + # Skip it if it does not have __init__.py + if not os.path.exists(os.path.join(dirpath, dname, + '__init__.py')): + continue + + # If it has extension(), delegate... + ext_name = ("%s%s.%s.extension" % + (package, relpkg, dname)) + try: + ext = importutils.import_class(ext_name) + except common_exception.NotFound: + # extension() doesn't exist on it, so we'll explore + # the directory for ourselves + subdirs.append(dname) + else: + try: + ext(ext_mgr) + except Exception as exc: + logger.warn(_('Failed to load extension %(ext_name)s: ' + '%(exc)s') % locals()) + + # Update the list of directories we'll explore... + dirnames[:] = subdirs + + +def extension_authorizer(api_name, extension_name): + def authorize(context, target=None): + if target is None: + target = {'project_id': context.project_id, + 'user_id': context.user_id} + action = '%s_extension:%s' % (api_name, extension_name) + cinder.policy.enforce(context, action, target) + return authorize + + +def soft_extension_authorizer(api_name, extension_name): + hard_authorize = extension_authorizer(api_name, extension_name) + + def authorize(context): + try: + hard_authorize(context) + return True + except exception.NotAuthorized: + return False + return authorize diff --git a/cinder/api/openstack/urlmap.py b/cinder/api/openstack/urlmap.py new file mode 100644 index 00000000000..ac320985405 --- /dev/null +++ b/cinder/api/openstack/urlmap.py @@ -0,0 +1,297 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap +import re +import urllib2 + +from cinder import log as logging +from cinder.api.openstack import wsgi + + +_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' +_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*' + r'(?:=\s*([^;]+|%s))?\s*' % + (_quoted_string_re, _quoted_string_re)) + +LOG = logging.getLogger(__name__) + + +def unquote_header_value(value): + """Unquotes a header value. + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + return value + + +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + :param value: a string with a list header. + :return: :class:`list` + """ + result = [] + for item in urllib2.parse_http_list(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +def parse_options_header(value): + """Parse a ``Content-Type`` like header into a tuple with the content + type and the options: + + >>> parse_options_header('Content-Type: text/html; mimetype=text/html') + ('Content-Type:', {'mimetype': 'text/html'}) + + :param value: the header to parse. + :return: (str, options) + """ + def _tokenize(string): + for match in _option_header_piece_re.finditer(string): + key, value = match.groups() + key = unquote_header_value(key) + if value is not None: + value = unquote_header_value(value) + yield key, value + + if not value: + return '', {} + + parts = _tokenize(';' + value) + name = parts.next()[0] + extra = dict(parts) + return name, extra + + +class Accept(object): + def __init__(self, value): + self._content_types = [parse_options_header(v) for v in + parse_list_header(value)] + + def best_match(self, supported_content_types): + # FIXME: Should we have a more sophisticated matching algorithm that + # takes into account the version as well? + best_quality = -1 + best_content_type = None + best_params = {} + best_match = '*/*' + + for content_type in supported_content_types: + for content_mask, params in self._content_types: + try: + quality = float(params.get('q', 1)) + except ValueError: + continue + + if quality < best_quality: + continue + elif best_quality == quality: + if best_match.count('*') <= content_mask.count('*'): + continue + + if self._match_mask(content_mask, content_type): + best_quality = quality + best_content_type = content_type + best_params = params + best_match = content_mask + + return best_content_type, best_params + + def content_type_params(self, best_content_type): + """Find parameters in Accept header for given content type.""" + for content_type, params in self._content_types: + if best_content_type == content_type: + return params + + return {} + + def _match_mask(self, mask, content_type): + if '*' not in mask: + return content_type == mask + if mask == '*/*': + return True + mask_major = mask[:-2] + content_type_major = content_type.split('/', 1)[0] + return content_type_major == mask_major + + +def urlmap_factory(loader, global_conf, **local_conf): + if 'not_found_app' in local_conf: + not_found_app = local_conf.pop('not_found_app') + else: + not_found_app = global_conf.get('not_found_app') + if not_found_app: + not_found_app = loader.get_app(not_found_app, global_conf=global_conf) + urlmap = URLMap(not_found_app=not_found_app) + for path, app_name in local_conf.items(): + path = paste.urlmap.parse_path_expression(path) + app = loader.get_app(app_name, global_conf=global_conf) + urlmap[path] = app + return urlmap + + +class URLMap(paste.urlmap.URLMap): + def _match(self, host, port, path_info): + """Find longest match for a given URL path.""" + for (domain, app_url), app in self.applications: + if domain and domain != host and domain != host + ':' + port: + continue + if (path_info == app_url + or path_info.startswith(app_url + '/')): + return app, app_url + + return None, None + + def _set_script_name(self, app, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + return app(environ, start_response) + + return wrap + + def _munge_path(self, app, path_info, app_url): + def wrap(environ, start_response): + environ['SCRIPT_NAME'] += app_url + environ['PATH_INFO'] = path_info[len(app_url):] + return app(environ, start_response) + + return wrap + + def _path_strategy(self, host, port, path_info): + """Check path suffix for MIME type and path prefix for API version.""" + mime_type = app = app_url = None + + parts = path_info.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: + mime_type = possible_type + + parts = path_info.split('/') + if len(parts) > 1: + possible_app, possible_app_url = self._match(host, port, path_info) + # Don't use prefix if it ends up matching default + if possible_app and possible_app_url: + app_url = possible_app_url + app = self._munge_path(possible_app, path_info, app_url) + + return mime_type, app, app_url + + def _content_type_strategy(self, host, port, environ): + """Check Content-Type header for API version.""" + app = None + params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return app + + def _accept_strategy(self, host, port, environ, supported_content_types): + """Check Accept header for best matching MIME type and API version.""" + accept = Accept(environ.get('HTTP_ACCEPT', '')) + + app = None + + # Find the best match in the Accept header + mime_type, params = accept.best_match(supported_content_types) + if 'version' in params: + app, app_url = self._match(host, port, '/v' + params['version']) + if app: + app = self._set_script_name(app, app_url) + + return mime_type, app + + def __call__(self, environ, start_response): + host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() + if ':' in host: + host, port = host.split(':', 1) + else: + if environ['wsgi.url_scheme'] == 'http': + port = '80' + else: + port = '443' + + path_info = environ['PATH_INFO'] + path_info = self.normalize_url(path_info, False)[1] + + # The MIME type for the response is determined in one of two ways: + # 1) URL path suffix (eg /servers/detail.json) + # 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2) + + # The API version is determined in one of three ways: + # 1) URL path prefix (eg /v1.1/tenant/servers/detail) + # 2) Content-Type header (eg application/json;version=1.1) + # 3) Accept header (eg application/json;q=0.8;version=1.1) + + supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) + + mime_type, app, app_url = self._path_strategy(host, port, path_info) + + # Accept application/atom+xml for the index query of each API + # version mount point as well as the root index + if (app_url and app_url + '/' == path_info) or path_info == '/': + supported_content_types.append('application/atom+xml') + + if not app: + app = self._content_type_strategy(host, port, environ) + + if not mime_type or not app: + possible_mime_type, possible_app = self._accept_strategy( + host, port, environ, supported_content_types) + if possible_mime_type and not mime_type: + mime_type = possible_mime_type + if possible_app and not app: + app = possible_app + + if not mime_type: + mime_type = 'application/json' + + if not app: + # Didn't match a particular version, probably matches default + app, app_url = self._match(host, port, path_info) + if app: + app = self._munge_path(app, path_info, app_url) + + if app: + environ['cinder.best_content_type'] = mime_type + return app(environ, start_response) + + environ['paste.urlmap_object'] = self + return self.not_found_application(environ, start_response) diff --git a/cinder/api/openstack/volume/__init__.py b/cinder/api/openstack/volume/__init__.py new file mode 100644 index 00000000000..2d9ac302bd8 --- /dev/null +++ b/cinder/api/openstack/volume/__init__.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack Volume API. +""" + +import cinder.api.openstack +from cinder.api.openstack.volume import extensions +from cinder.api.openstack.volume import snapshots +from cinder.api.openstack.volume import types +from cinder.api.openstack.volume import volumes +from cinder.api.openstack.volume import versions +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +class APIRouter(cinder.api.openstack.APIRouter): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + ExtensionManager = extensions.ExtensionManager + + def _setup_routes(self, mapper): + self.resources['versions'] = versions.create_resource() + mapper.connect("versions", "/", + controller=self.resources['versions'], + action='show') + + mapper.redirect("", "/") + + self.resources['volumes'] = volumes.create_resource() + mapper.resource("volume", "volumes", + controller=self.resources['volumes'], + collection={'detail': 'GET'}) + + self.resources['types'] = types.create_resource() + mapper.resource("type", "types", + controller=self.resources['types']) + + self.resources['snapshots'] = snapshots.create_resource() + mapper.resource("snapshot", "snapshots", + controller=self.resources['snapshots'], + collection={'detail': 'GET'}) diff --git a/cinder/api/openstack/volume/contrib/__init__.py b/cinder/api/openstack/volume/contrib/__init__.py new file mode 100644 index 00000000000..c49a4c6d3ad --- /dev/null +++ b/cinder/api/openstack/volume/contrib/__init__.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Contrib contains extensions that are shipped with cinder. + +It can't be called 'extensions' because that causes namespacing problems. + +""" + +from cinder import flags +from cinder import log as logging +from cinder.api.openstack import extensions + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def standard_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) + + +def select_extensions(ext_mgr): + extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, + FLAGS.osapi_volume_ext_list) diff --git a/cinder/api/openstack/volume/contrib/types_extra_specs.py b/cinder/api/openstack/volume/contrib/types_extra_specs.py new file mode 100644 index 00000000000..e0c4d595cdf --- /dev/null +++ b/cinder/api/openstack/volume/contrib/types_extra_specs.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types extra specs extension""" + +import webob + +from cinder.api.openstack import extensions +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import db +from cinder import exception +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_extra_specs') + + +class VolumeTypeExtraSpecsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecTemplate(xmlutil.TemplateBuilder): + def construct(self): + tagname = xmlutil.Selector('key') + + def extraspec_sel(obj, do_raise=False): + # Have to extract the key and value for later use... + key, value = obj.items()[0] + return dict(key=key, value=value) + + root = xmlutil.TemplateElement(tagname, selector=extraspec_sel) + root.text = 'value' + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypeExtraSpecsController(object): + """ The volume type extra specs API controller for the OpenStack API """ + + def _get_extra_specs(self, context, type_id): + extra_specs = db.volume_type_extra_specs_get(context, type_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_body(self, body): + if not body: + expl = _('No Request Body') + raise webob.exc.HTTPBadRequest(explanation=expl) + + def _check_type(self, context, type_id): + try: + volume_types.get_volume_type(context, type_id) + except exception.NotFound as ex: + raise webob.exc.HTTPNotFound(explanation=unicode(ex)) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def index(self, req, type_id): + """ Returns the list of extra specs for a given volume type """ + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + return self._get_extra_specs(context, type_id) + + @wsgi.serializers(xml=VolumeTypeExtraSpecsTemplate) + def create(self, req, type_id, body=None): + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + self._check_body(body) + specs = body.get('extra_specs') + if not isinstance(specs, dict): + expl = _('Malformed extra specs') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + specs) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def update(self, req, type_id, id, body=None): + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + self._check_body(body) + if not id in body: + expl = _('Request body and URI mismatch') + raise webob.exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise webob.exc.HTTPBadRequest(explanation=expl) + db.volume_type_extra_specs_update_or_create(context, + type_id, + body) + return body + + @wsgi.serializers(xml=VolumeTypeExtraSpecTemplate) + def show(self, req, type_id, id): + """Return a single extra spec item.""" + context = req.environ['cinder.context'] + authorize(context) + self._check_type(context, type_id) + specs = self._get_extra_specs(context, type_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + raise webob.exc.HTTPNotFound() + + def delete(self, req, type_id, id): + """ Deletes an existing extra spec """ + context = req.environ['cinder.context'] + self._check_type(context, type_id) + authorize(context) + db.volume_type_extra_specs_delete(context, type_id, id) + return webob.Response(status_int=202) + + +class Types_extra_specs(extensions.ExtensionDescriptor): + """Types extra specs support""" + + name = "TypesExtraSpecs" + alias = "os-types-extra-specs" + namespace = "http://docs.openstack.org/volume/ext/types-extra-specs/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('extra_specs', + VolumeTypeExtraSpecsController(), + parent=dict( + member_name='type', + collection_name='types')) + resources.append(res) + + return resources diff --git a/cinder/api/openstack/volume/contrib/types_manage.py b/cinder/api/openstack/volume/contrib/types_manage.py new file mode 100644 index 00000000000..bb8921a0f3e --- /dev/null +++ b/cinder/api/openstack/volume/contrib/types_manage.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volume types manage extension.""" + +import webob + +from cinder.api.openstack import extensions +from cinder.api.openstack.volume import types +from cinder.api.openstack import wsgi +from cinder import exception +from cinder.volume import volume_types + + +authorize = extensions.extension_authorizer('volume', 'types_manage') + + +class VolumeTypesManageController(wsgi.Controller): + """ The volume types API controller for the OpenStack API """ + + @wsgi.action("create") + @wsgi.serializers(xml=types.VolumeTypeTemplate) + def _create(self, req, body): + """Creates a new volume type.""" + context = req.environ['cinder.context'] + authorize(context) + + if not body or body == "": + raise webob.exc.HTTPUnprocessableEntity() + + vol_type = body.get('volume_type', None) + if vol_type is None or vol_type == "": + raise webob.exc.HTTPUnprocessableEntity() + + name = vol_type.get('name', None) + specs = vol_type.get('extra_specs', {}) + + if name is None or name == "": + raise webob.exc.HTTPUnprocessableEntity() + + try: + volume_types.create(context, name, specs) + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.VolumeTypeExists as err: + raise webob.exc.HTTPConflict(explanation=str(err)) + except exception.NotFound: + raise webob.exc.HTTPNotFound() + + return {'volume_type': vol_type} + + @wsgi.action("delete") + def _delete(self, req, id): + """ Deletes an existing volume type """ + context = req.environ['cinder.context'] + authorize(context) + + try: + vol_type = volume_types.get_volume_type(context, id) + volume_types.destroy(context, vol_type['name']) + except exception.NotFound: + raise webob.exc.HTTPNotFound() + + return webob.Response(status_int=202) + + +class Types_manage(extensions.ExtensionDescriptor): + """Types manage support""" + + name = "TypesManage" + alias = "os-types-manage" + namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1" + updated = "2011-08-24T00:00:00+00:00" + + def get_controller_extensions(self): + controller = VolumeTypesManageController() + extension = extensions.ControllerExtension(self, 'types', controller) + return [extension] diff --git a/cinder/api/openstack/volume/extensions.py b/cinder/api/openstack/volume/extensions.py new file mode 100644 index 00000000000..ffe284555e2 --- /dev/null +++ b/cinder/api/openstack/volume/extensions.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.openstack import extensions as base_extensions +from cinder import flags +from cinder import log as logging + + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class ExtensionManager(base_extensions.ExtensionManager): + def __init__(self): + LOG.audit(_('Initializing extension manager.')) + + self.cls_list = FLAGS.osapi_volume_extension + self.extensions = {} + self._load_extensions() diff --git a/cinder/api/openstack/volume/snapshots.py b/cinder/api/openstack/volume/snapshots.py new file mode 100644 index 00000000000..f6d5304ec18 --- /dev/null +++ b/cinder/api/openstack/volume/snapshots.py @@ -0,0 +1,170 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes snapshots api.""" + +from webob import exc +import webob + +from cinder.api.openstack import common +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import volume + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_snapshot_detail_view(context, vol): + """Maps keys for snapshots details view.""" + + d = _translate_snapshot_summary_view(context, vol) + + # NOTE(gagupta): No additional data / lookups at the moment + return d + + +def _translate_snapshot_summary_view(context, vol): + """Maps keys for snapshots summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + d['id'] = str(vol['id']) + d['volume_id'] = str(vol['volume_id']) + d['status'] = vol['status'] + # NOTE(gagupta): We map volume_size as the snapshot size + d['size'] = vol['volume_size'] + d['created_at'] = vol['created_at'] + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + return d + + +def make_snapshot(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_id') + + +class SnapshotTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshot', selector='snapshot') + make_snapshot(root) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('snapshots') + elem = xmlutil.SubTemplateElement(root, 'snapshot', + selector='snapshots') + make_snapshot(elem) + return xmlutil.MasterTemplate(root, 1) + + +class SnapshotsController(object): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(SnapshotsController, self).__init__() + + @wsgi.serializers(xml=SnapshotTemplate) + def show(self, req, id): + """Return data about the given snapshot.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get_snapshot(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'snapshot': _translate_snapshot_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a snapshot.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete snapshot with id: %s"), id, context=context) + + try: + snapshot = self.volume_api.get_snapshot(context, id) + self.volume_api.delete_snapshot(context, snapshot) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=SnapshotsTemplate) + def index(self, req): + """Returns a summary list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_summary_view) + + @wsgi.serializers(xml=SnapshotsTemplate) + def detail(self, req): + """Returns a detailed list of snapshots.""" + return self._items(req, entity_maker=_translate_snapshot_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of snapshots, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + snapshots = self.volume_api.get_all_snapshots(context) + limited_list = common.limited(snapshots, req) + res = [entity_maker(context, snapshot) for snapshot in limited_list] + return {'snapshots': res} + + @wsgi.serializers(xml=SnapshotTemplate) + def create(self, req, body): + """Creates a new snapshot.""" + context = req.environ['cinder.context'] + + if not body: + return exc.HTTPUnprocessableEntity() + + snapshot = body['snapshot'] + volume_id = snapshot['volume_id'] + volume = self.volume_api.get(context, volume_id) + force = snapshot.get('force', False) + msg = _("Create snapshot from volume %s") + LOG.audit(msg, volume_id, context=context) + + if force: + new_snapshot = self.volume_api.create_snapshot_force(context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description')) + else: + new_snapshot = self.volume_api.create_snapshot(context, + volume, + snapshot.get('display_name'), + snapshot.get('display_description')) + + retval = _translate_snapshot_detail_view(context, new_snapshot) + + return {'snapshot': retval} + + +def create_resource(): + return wsgi.Resource(SnapshotsController()) diff --git a/cinder/api/openstack/volume/types.py b/cinder/api/openstack/volume/types.py new file mode 100644 index 00000000000..8fea061902c --- /dev/null +++ b/cinder/api/openstack/volume/types.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The volume type & volume types extra specs extension""" + +from webob import exc + +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder.volume import volume_types + + +def make_voltype(elem): + elem.set('id') + elem.set('name') + extra_specs = xmlutil.make_flat_dict('extra_specs', selector='extra_specs') + elem.append(extra_specs) + + +class VolumeTypeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_type', selector='volume_type') + make_voltype(root) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume_types') + elem = xmlutil.SubTemplateElement(root, 'volume_type', + selector='volume_types') + make_voltype(elem) + return xmlutil.MasterTemplate(root, 1) + + +class VolumeTypesController(object): + """ The volume types API controller for the OpenStack API """ + + @wsgi.serializers(xml=VolumeTypesTemplate) + def index(self, req): + """ Returns the list of volume types """ + context = req.environ['cinder.context'] + return {'volume_types': volume_types.get_all_types(context).values()} + + @wsgi.serializers(xml=VolumeTypeTemplate) + def show(self, req, id): + """ Return a single volume type item """ + context = req.environ['cinder.context'] + + try: + vol_type = volume_types.get_volume_type(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + # TODO(bcwaldon): remove str cast once we use uuids + vol_type['id'] = str(vol_type['id']) + return {'volume_type': vol_type} + + +def create_resource(): + return wsgi.Resource(VolumeTypesController()) diff --git a/cinder/api/openstack/volume/versions.py b/cinder/api/openstack/volume/versions.py new file mode 100644 index 00000000000..7dcfdbe6c04 --- /dev/null +++ b/cinder/api/openstack/volume/versions.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.openstack.compute import versions +from cinder.api.openstack.volume.views import versions as views_versions +from cinder.api.openstack import wsgi + + +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://jorgew.github.com/block-storage-api/" + "content/os-block-storage-1.0.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + #(anthony) FIXME + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1", + } + ], + } +} + + +class Versions(versions.Versions): + @wsgi.serializers(xml=versions.VersionsTemplate, + atom=versions.VersionsAtomSerializer) + def index(self, req): + """Return all versions.""" + builder = views_versions.get_view_builder(req) + return builder.build_versions(VERSIONS) + + @wsgi.serializers(xml=versions.ChoicesTemplate) + @wsgi.response(300) + def multi(self, req): + """Return multiple choices.""" + builder = views_versions.get_view_builder(req) + return builder.build_choices(VERSIONS, req) + + +class VolumeVersionV1(object): + @wsgi.serializers(xml=versions.VersionTemplate, + atom=versions.VersionAtomSerializer) + def show(self, req): + builder = views_versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.0']) + + +def create_resource(): + return wsgi.Resource(VolumeVersionV1()) diff --git a/cinder/api/openstack/volume/views/__init__.py b/cinder/api/openstack/volume/views/__init__.py new file mode 100644 index 00000000000..d65c689a83d --- /dev/null +++ b/cinder/api/openstack/volume/views/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/api/openstack/volume/views/versions.py b/cinder/api/openstack/volume/views/versions.py new file mode 100644 index 00000000000..a4bd164b2d4 --- /dev/null +++ b/cinder/api/openstack/volume/views/versions.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from cinder.api.openstack.compute.views import versions as compute_views + + +def get_view_builder(req): + base_url = req.application_url + return ViewBuilder(base_url) + + +class ViewBuilder(compute_views.ViewBuilder): + def generate_href(self, path=None): + """Create an url that refers to a specific version_number.""" + version_number = 'v1' + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/cinder/api/openstack/volume/volumes.py b/cinder/api/openstack/volume/volumes.py new file mode 100644 index 00000000000..9d4b4b5d588 --- /dev/null +++ b/cinder/api/openstack/volume/volumes.py @@ -0,0 +1,263 @@ +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The volumes api.""" + +from webob import exc +import webob + +from cinder.api.openstack import common +from cinder.api.openstack import wsgi +from cinder.api.openstack import xmlutil +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import volume +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + + +FLAGS = flags.FLAGS + + +def _translate_attachment_detail_view(_context, vol): + """Maps keys for attachment details view.""" + + d = _translate_attachment_summary_view(_context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_attachment_summary_view(_context, vol): + """Maps keys for attachment summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + volume_id = str(vol['id']) + + # NOTE(justinsb): We use the volume id as the id of the attachment object + d['id'] = volume_id + + d['volume_id'] = volume_id + if vol.get('instance'): + d['server_id'] = vol['instance']['uuid'] + if vol.get('mountpoint'): + d['device'] = vol['mountpoint'] + + return d + + +def _translate_volume_detail_view(context, vol): + """Maps keys for volumes details view.""" + + d = _translate_volume_summary_view(context, vol) + + # No additional data / lookups at the moment + + return d + + +def _translate_volume_summary_view(context, vol): + """Maps keys for volumes summary view.""" + d = {} + + # TODO(bcwaldon): remove str cast once we use uuids + d['id'] = str(vol['id']) + d['status'] = vol['status'] + d['size'] = vol['size'] + d['availability_zone'] = vol['availability_zone'] + d['created_at'] = vol['created_at'] + + d['attachments'] = [] + if vol['attach_status'] == 'attached': + attachment = _translate_attachment_detail_view(context, vol) + d['attachments'].append(attachment) + + d['display_name'] = vol['display_name'] + d['display_description'] = vol['display_description'] + + if vol['volume_type_id'] and vol.get('volume_type'): + d['volume_type'] = vol['volume_type']['name'] + else: + # TODO(bcwaldon): remove str cast once we use uuids + d['volume_type'] = str(vol['volume_type_id']) + + d['snapshot_id'] = vol['snapshot_id'] + # TODO(bcwaldon): remove str cast once we use uuids + if d['snapshot_id'] is not None: + d['snapshot_id'] = str(d['snapshot_id']) + + LOG.audit(_("vol=%s"), vol, context=context) + + if vol.get('volume_metadata'): + meta_dict = {} + for i in vol['volume_metadata']: + meta_dict[i['key']] = i['value'] + d['metadata'] = meta_dict + else: + d['metadata'] = {} + + return d + + +def make_attachment(elem): + elem.set('id') + elem.set('server_id') + elem.set('volume_id') + elem.set('device') + + +def make_volume(elem): + elem.set('id') + elem.set('status') + elem.set('size') + elem.set('availability_zone') + elem.set('created_at') + elem.set('display_name') + elem.set('display_description') + elem.set('volume_type') + elem.set('snapshot_id') + + attachments = xmlutil.SubTemplateElement(elem, 'attachments') + attachment = xmlutil.SubTemplateElement(attachments, 'attachment', + selector='attachments') + make_attachment(attachment) + + metadata = xmlutil.make_flat_dict('metadata') + elem.append(metadata) + + +volume_nsmap = {None: xmlutil.XMLNS_VOLUME_V1, 'atom': xmlutil.XMLNS_ATOM} + + +class VolumeTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volume', selector='volume') + make_volume(root) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('volumes') + elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') + make_volume(elem) + return xmlutil.MasterTemplate(root, 1, nsmap=volume_nsmap) + + +class VolumeController(object): + """The Volumes API controller for the OpenStack API.""" + + def __init__(self): + self.volume_api = volume.API() + super(VolumeController, self).__init__() + + @wsgi.serializers(xml=VolumeTemplate) + def show(self, req, id): + """Return data about the given volume.""" + context = req.environ['cinder.context'] + + try: + vol = self.volume_api.get(context, id) + except exception.NotFound: + raise exc.HTTPNotFound() + + return {'volume': _translate_volume_detail_view(context, vol)} + + def delete(self, req, id): + """Delete a volume.""" + context = req.environ['cinder.context'] + + LOG.audit(_("Delete volume with id: %s"), id, context=context) + + try: + volume = self.volume_api.get(context, id) + self.volume_api.delete(context, volume) + except exception.NotFound: + raise exc.HTTPNotFound() + return webob.Response(status_int=202) + + @wsgi.serializers(xml=VolumesTemplate) + def index(self, req): + """Returns a summary list of volumes.""" + return self._items(req, entity_maker=_translate_volume_summary_view) + + @wsgi.serializers(xml=VolumesTemplate) + def detail(self, req): + """Returns a detailed list of volumes.""" + return self._items(req, entity_maker=_translate_volume_detail_view) + + def _items(self, req, entity_maker): + """Returns a list of volumes, transformed through entity_maker.""" + context = req.environ['cinder.context'] + + volumes = self.volume_api.get_all(context) + limited_list = common.limited(volumes, req) + res = [entity_maker(context, vol) for vol in limited_list] + return {'volumes': res} + + @wsgi.serializers(xml=VolumeTemplate) + def create(self, req, body): + """Creates a new volume.""" + context = req.environ['cinder.context'] + + if not body: + raise exc.HTTPUnprocessableEntity() + + volume = body['volume'] + size = volume['size'] + LOG.audit(_("Create volume of %s GB"), size, context=context) + + kwargs = {} + + req_volume_type = volume.get('volume_type', None) + if req_volume_type: + try: + kwargs['volume_type'] = volume_types.get_volume_type_by_name( + context, req_volume_type) + except exception.NotFound: + raise exc.HTTPNotFound() + + kwargs['metadata'] = volume.get('metadata', None) + + snapshot_id = volume.get('snapshot_id') + if snapshot_id is not None: + kwargs['snapshot'] = self.volume_api.get_snapshot(context, + snapshot_id) + else: + kwargs['snapshot'] = None + + kwargs['availability_zone'] = volume.get('availability_zone', None) + + new_volume = self.volume_api.create(context, + size, + volume.get('display_name'), + volume.get('display_description'), + **kwargs) + + # TODO(vish): Instance should be None at db layer instead of + # trying to lazy load, but for now we turn it into + # a dict to avoid an error. + retval = _translate_volume_detail_view(context, dict(new_volume)) + + return {'volume': retval} + + +def create_resource(): + return wsgi.Resource(VolumeController()) diff --git a/cinder/api/openstack/wsgi.py b/cinder/api/openstack/wsgi.py new file mode 100644 index 00000000000..bb309056ead --- /dev/null +++ b/cinder/api/openstack/wsgi.py @@ -0,0 +1,1123 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +from xml.dom import minidom +from xml.parsers import expat +import math +import time + +from lxml import etree +import webob + +from cinder import exception +from cinder import log as logging +from cinder import utils +from cinder import wsgi + + +XMLNS_V1 = 'http://docs.openstack.org/volume/api/v1' + +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + +LOG = logging.getLogger(__name__) + +# The vendor content types should serialize identically to the non-vendor +# content types. So to avoid littering the code with both options, we +# map the vendor to the other when looking up the type +_CONTENT_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'application/json', + 'application/vnd.openstack.volume+xml': 'application/xml', +} + +SUPPORTED_CONTENT_TYPES = ( + 'application/json', + 'application/vnd.openstack.volume+json', + 'application/xml', + 'application/vnd.openstack.volume+xml', +) + +_MEDIA_TYPE_MAP = { + 'application/vnd.openstack.volume+json': 'json', + 'application/json': 'json', + 'application/vnd.openstack.volume+xml': 'xml', + 'application/xml': 'xml', + 'application/atom+xml': 'atom', +} + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type.""" + if 'cinder.best_content_type' not in self.environ: + # Calculate the best MIME type + content_type = None + + # Check URL path suffix + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + possible_type = 'application/' + parts[1] + if possible_type in SUPPORTED_CONTENT_TYPES: + content_type = possible_type + + if not content_type: + content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES) + + self.environ['cinder.best_content_type'] = (content_type or + 'application/json') + + return self.environ['cinder.best_content_type'] + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if not "Content-Type" in self.headers: + return None + + allowed_types = SUPPORTED_CONTENT_TYPES + content_type = self.content_type + + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) + + return content_type + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() + + +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" + + def deserialize(self, datastring, action='default'): + return self.dispatch(datastring, action=action) + + def default(self, datastring): + return {} + + +class JSONDeserializer(TextDeserializer): + + def _from_json(self, datastring): + try: + return utils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + def default(self, datastring): + return {'body': self._from_json(datastring)} + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def _from_xml(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + + try: + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + def find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" + + def find_attribute_or_element(self, parent, name): + """Get an attribute value; fallback to an element if not found""" + if parent.hasAttribute(name): + return parent.getAttribute(name) + + node = self.find_first_child_named(parent, name) + if node: + return self.extract_text(node) + + return None + + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class MetadataXMLDeserializer(XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + metadata = {} + if metadata_node is not None: + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata + + +class DictSerializer(ActionDispatcher): + """Default request body serialization""" + + def serialize(self, data, action='default'): + return self.dispatch(data, action=action) + + def default(self, data): + return "" + + +class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" + + def default(self, data): + return utils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + return self.to_xml_string(node) + + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml('UTF-8') + + #NOTE (ameade): the has_atom should be removed after all of the + # xml serializers and view builders have been updated to the current + # spec that required all responses include the xmlns:atom, the has_atom + # flag is to prevent current tests from breaking + def _add_xmlns(self, node, has_atom=False): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) + if has_atom: + node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom") + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if isinstance(data, list): + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif isinstance(data, dict): + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + def _create_link_nodes(self, xml_doc, links): + link_nodes = [] + for link in links: + link_node = xml_doc.createElement('atom:link') + link_node.setAttribute('rel', link['rel']) + link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) + link_nodes.append(link_node) + return link_nodes + + def _to_xml(self, root): + """Convert the xml object to an xml string.""" + return etree.tostring(root, encoding='UTF-8', xml_declaration=True) + + +def serializers(**serializers): + """Attaches serializers to a method. + + This decorator associates a dictionary of serializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_serializers'): + func.wsgi_serializers = {} + func.wsgi_serializers.update(serializers) + return func + return decorator + + +def deserializers(**deserializers): + """Attaches deserializers to a method. + + This decorator associates a dictionary of deserializers with a + method. Note that the function attributes are directly + manipulated; the method is not wrapped. + """ + + def decorator(func): + if not hasattr(func, 'wsgi_deserializers'): + func.wsgi_deserializers = {} + func.wsgi_deserializers.update(deserializers) + return func + return decorator + + +def response(code): + """Attaches response code to a method. + + This decorator associates a response code with a method. Note + that the function attributes are directly manipulated; the method + is not wrapped. + """ + + def decorator(func): + func.wsgi_code = code + return func + return decorator + + +class ResponseObject(object): + """Bundles a response object with appropriate serializers. + + Object that app methods may return in order to bind alternate + serializers with a response object to be serialized. Its use is + optional. + """ + + def __init__(self, obj, code=None, **serializers): + """Binds serializers with an object. + + Takes keyword arguments akin to the @serializer() decorator + for specifying serializers. Serializers specified will be + given preference over default serializers or method-specific + serializers on return. + """ + + self.obj = obj + self.serializers = serializers + self._default_code = 200 + self._code = code + self._headers = {} + self.serializer = None + self.media_type = None + + def __getitem__(self, key): + """Retrieves a header with the given name.""" + + return self._headers[key.lower()] + + def __setitem__(self, key, value): + """Sets a header with the given name to the given value.""" + + self._headers[key.lower()] = value + + def __delitem__(self, key): + """Deletes the header with the given name.""" + + del self._headers[key.lower()] + + def _bind_method_serializers(self, meth_serializers): + """Binds method serializers with the response object. + + Binds the method serializers with the response object. + Serializers specified to the constructor will take precedence + over serializers specified to this method. + + :param meth_serializers: A dictionary with keys mapping to + response types and values containing + serializer objects. + """ + + # We can't use update because that would be the wrong + # precedence + for mtype, serializer in meth_serializers.items(): + self.serializers.setdefault(mtype, serializer) + + def get_serializer(self, content_type, default_serializers=None): + """Returns the serializer for the wrapped object. + + Returns the serializer for the wrapped object subject to the + indicated content type. If no serializer matching the content + type is attached, an appropriate serializer drawn from the + default serializers will be used. If no appropriate + serializer is available, raises InvalidContentType. + """ + + default_serializers = default_serializers or {} + + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in self.serializers: + return mtype, self.serializers[mtype] + else: + return mtype, default_serializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def preserialize(self, content_type, default_serializers=None): + """Prepares the serializer that will be used to serialize. + + Determines the serializer that will be used and prepares an + instance of it for later call. This allows the serializer to + be accessed by extensions for, e.g., template extension. + """ + + mtype, serializer = self.get_serializer(content_type, + default_serializers) + self.media_type = mtype + self.serializer = serializer() + + def attach(self, **kwargs): + """Attach slave templates to serializers.""" + + if self.media_type in kwargs: + self.serializer.attach(kwargs[self.media_type]) + + def serialize(self, request, content_type, default_serializers=None): + """Serializes the wrapped object. + + Utility method for serializing the wrapped object. Returns a + webob.Response object. + """ + + if self.serializer: + serializer = self.serializer + else: + _mtype, _serializer = self.get_serializer(content_type, + default_serializers) + serializer = _serializer() + + response = webob.Response() + response.status_int = self.code + for hdr, value in self._headers.items(): + response.headers[hdr] = value + response.headers['Content-Type'] = content_type + if self.obj is not None: + response.body = serializer.serialize(self.obj) + + return response + + @property + def code(self): + """Retrieve the response status.""" + + return self._code or self._default_code + + @property + def headers(self): + """Retrieve the headers.""" + + return self._headers.copy() + + +def action_peek_json(body): + """Determine action to invoke.""" + + try: + decoded = utils.loads(body) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + + # Make sure there's exactly one key... + if len(decoded) != 1: + msg = _("too many body keys") + raise exception.MalformedRequestBody(reason=msg) + + # Return the action and the decoded body... + return decoded.keys()[0] + + +def action_peek_xml(body): + """Determine action to invoke.""" + + dom = minidom.parseString(body) + action_node = dom.childNodes[0] + + return action_node.tagName + + +class ResourceExceptionHandler(object): + """Context manager to handle Resource exceptions. + + Used when processing exceptions generated by API implementation + methods (or their extensions). Converts most exceptions to Fault + exceptions, with the appropriate logging. + """ + + def __enter__(self): + return None + + def __exit__(self, ex_type, ex_value, ex_traceback): + if not ex_value: + return True + + if isinstance(ex_value, exception.NotAuthorized): + msg = unicode(ex_value) + raise Fault(webob.exc.HTTPForbidden(explanation=msg)) + elif isinstance(ex_value, exception.Invalid): + raise Fault(exception.ConvertedException( + code=ex_value.code, explanation=unicode(ex_value))) + elif isinstance(ex_value, TypeError): + exc_info = (ex_type, ex_value, ex_traceback) + LOG.error(_('Exception handling resource: %s') % ex_value, + exc_info=exc_info) + raise Fault(webob.exc.HTTPBadRequest()) + elif isinstance(ex_value, Fault): + LOG.info(_("Fault thrown: %s"), unicode(ex_value)) + raise ex_value + elif isinstance(ex_value, webob.exc.HTTPException): + LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value)) + raise Fault(ex_value) + + # We didn't handle the exception + return False + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + Exceptions derived from webob.exc.HTTPException will be automatically + wrapped in Fault() to provide API friendly error responses. + + """ + + def __init__(self, controller, action_peek=None, **deserializers): + """ + :param controller: object that implement methods created by routes lib + :param action_peek: dictionary of routines for peeking into an action + request body to determine the desired action + """ + + self.controller = controller + + default_deserializers = dict(xml=XMLDeserializer, + json=JSONDeserializer) + default_deserializers.update(deserializers) + + self.default_deserializers = default_deserializers + self.default_serializers = dict(xml=XMLDictSerializer, + json=JSONDictSerializer) + + self.action_peek = dict(xml=action_peek_xml, + json=action_peek_json) + self.action_peek.update(action_peek or {}) + + # Copy over the actions dictionary + self.wsgi_actions = {} + if controller: + self.register_actions(controller) + + # Save a mapping of extensions + self.wsgi_extensions = {} + self.wsgi_action_extensions = {} + + def register_actions(self, controller): + """Registers controller actions with this resource.""" + + actions = getattr(controller, 'wsgi_actions', {}) + for key, method_name in actions.items(): + self.wsgi_actions[key] = getattr(controller, method_name) + + def register_extensions(self, controller): + """Registers controller extensions with this resource.""" + + extensions = getattr(controller, 'wsgi_extensions', []) + for method_name, action_name in extensions: + # Look up the extending method + extension = getattr(controller, method_name) + + if action_name: + # Extending an action... + if action_name not in self.wsgi_action_extensions: + self.wsgi_action_extensions[action_name] = [] + self.wsgi_action_extensions[action_name].append(extension) + else: + # Extending a regular method + if method_name not in self.wsgi_extensions: + self.wsgi_extensions[method_name] = [] + self.wsgi_extensions[method_name].append(extension) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + + # NOTE(Vek): Check for get_action_args() override in the + # controller + if hasattr(self.controller, 'get_action_args'): + return self.controller.get_action_args(request_environment) + + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except (KeyError, IndexError, AttributeError): + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + def get_body(self, request): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return None, '' + + if not content_type: + LOG.debug(_("No Content-Type provided in request")) + return None, '' + + if len(request.body) <= 0: + LOG.debug(_("Empty body provided in request")) + return None, '' + + return content_type, request.body + + def deserialize(self, meth, content_type, body): + meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) + try: + mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) + if mtype in meth_deserializers: + deserializer = meth_deserializers[mtype] + else: + deserializer = self.default_deserializers[mtype] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + return deserializer().deserialize(body) + + def pre_process_extensions(self, extensions, request, action_args): + # List of callables for post-processing extensions + post = [] + + for ext in extensions: + if inspect.isgeneratorfunction(ext): + response = None + + # If it's a generator function, the part before the + # yield is the preprocessing stage + try: + with ResourceExceptionHandler(): + gen = ext(req=request, **action_args) + response = gen.next() + except Fault as ex: + response = ex + + # We had a response... + if response: + return response, [] + + # No response, queue up generator for post-processing + post.append(gen) + else: + # Regular functions only perform post-processing + post.append(ext) + + # Run post-processing in the reverse order + return None, reversed(post) + + def post_process_extensions(self, extensions, resp_obj, request, + action_args): + for ext in extensions: + response = None + if inspect.isgenerator(ext): + # If it's a generator, run the second half of + # processing + try: + with ResourceExceptionHandler(): + response = ext.send(resp_obj) + except StopIteration: + # Normal exit of generator + continue + except Fault as ex: + response = ex + else: + # Regular functions get post-processing... + try: + with ResourceExceptionHandler(): + response = ext(req=request, resp_obj=resp_obj, + **action_args) + except Fault as ex: + response = ex + + # We had a response... + if response: + return response + + return None + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.info("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) + + # Identify the action, its arguments, and the requested + # content type + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + content_type, body = self.get_body(request) + accept = request.best_match_content_type() + + # NOTE(Vek): Splitting the function up this way allows for + # auditing by external tools that wrap the existing + # function. If we try to audit __call__(), we can + # run into troubles due to the @webob.dec.wsgify() + # decorator. + return self._process_stack(request, action, action_args, + content_type, body, accept) + + def _process_stack(self, request, action, action_args, + content_type, body, accept): + """Implement the processing stack.""" + + # Get the implementing method + try: + meth, extensions = self.get_method(request, action, + content_type, body) + except (AttributeError, TypeError): + return Fault(webob.exc.HTTPNotFound()) + except KeyError as ex: + msg = _("There is no such action: %s") % ex.args[0] + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Now, deserialize the request body... + try: + if content_type: + contents = self.deserialize(meth, content_type, body) + else: + contents = {} + except exception.InvalidContentType: + msg = _("Unsupported Content-Type") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Update the action args + action_args.update(contents) + + project_id = action_args.pop("project_id", None) + context = request.environ.get('cinder.context') + if (context and project_id and (project_id != context.project_id)): + msg = _("Malformed request url") + return Fault(webob.exc.HTTPBadRequest(explanation=msg)) + + # Run pre-processing extensions + response, post = self.pre_process_extensions(extensions, + request, action_args) + + if not response: + try: + with ResourceExceptionHandler(): + action_result = self.dispatch(meth, request, action_args) + except Fault as ex: + response = ex + + if not response: + # No exceptions; convert action_result into a + # ResponseObject + resp_obj = None + if type(action_result) is dict or action_result is None: + resp_obj = ResponseObject(action_result) + elif isinstance(action_result, ResponseObject): + resp_obj = action_result + else: + response = action_result + + # Run post-processing extensions + if resp_obj: + _set_request_id_header(request, resp_obj) + # Do a preserialize to set up the response object + serializers = getattr(meth, 'wsgi_serializers', {}) + resp_obj._bind_method_serializers(serializers) + if hasattr(meth, 'wsgi_code'): + resp_obj._default_code = meth.wsgi_code + resp_obj.preserialize(accept, self.default_serializers) + + # Process post-processing extensions + response = self.post_process_extensions(post, resp_obj, + request, action_args) + + if resp_obj and not response: + response = resp_obj.serialize(request, accept, + self.default_serializers) + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError, e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s") % msg_dict + + LOG.info(msg) + + return response + + def get_method(self, request, action, content_type, body): + """Look up the action-specific method and its extensions.""" + + # Look up the method + try: + if not self.controller: + meth = getattr(self, action) + else: + meth = getattr(self.controller, action) + except AttributeError: + if (not self.wsgi_actions or + action not in ['action', 'create', 'delete']): + # Propagate the error + raise + else: + return meth, self.wsgi_extensions.get(action, []) + + if action == 'action': + # OK, it's an action; figure out which action... + mtype = _MEDIA_TYPE_MAP.get(content_type) + action_name = self.action_peek[mtype](body) + else: + action_name = action + + # Look up the action method + return (self.wsgi_actions[action_name], + self.wsgi_action_extensions.get(action_name, [])) + + def dispatch(self, method, request, action_args): + """Dispatch a call to the action-specific method.""" + + return method(req=request, **action_args) + + +def action(name): + """Mark a function as an action. + + The given name will be taken as the action key in the body. + + This is also overloaded to allow extensions to provide + non-extending definitions of create and delete operations. + """ + + def decorator(func): + func.wsgi_action = name + return func + return decorator + + +def extends(*args, **kwargs): + """Indicate a function extends an operation. + + Can be used as either:: + + @extends + def index(...): + pass + + or as:: + + @extends(action='resize') + def _action_resize(...): + pass + """ + + def decorator(func): + # Store enough information to find what we're extending + func.wsgi_extends = (func.__name__, kwargs.get('action')) + return func + + # If we have positional arguments, call the decorator + if args: + return decorator(*args) + + # OK, return the decorator instead + return decorator + + +class ControllerMetaclass(type): + """Controller metaclass. + + This metaclass automates the task of assembling a dictionary + mapping action keys to method names. + """ + + def __new__(mcs, name, bases, cls_dict): + """Adds the wsgi_actions dictionary to the class.""" + + # Find all actions + actions = {} + extensions = [] + for key, value in cls_dict.items(): + if not callable(value): + continue + if getattr(value, 'wsgi_action', None): + actions[value.wsgi_action] = key + elif getattr(value, 'wsgi_extends', None): + extensions.append(value.wsgi_extends) + + # Add the actions and extensions to the class dict + cls_dict['wsgi_actions'] = actions + cls_dict['wsgi_extensions'] = extensions + + return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, + cls_dict) + + +class Controller(object): + """Default controller.""" + + __metaclass__ = ControllerMetaclass + + _view_builder_class = None + + def __init__(self, view_builder=None): + """Initialize controller with a view builder instance.""" + if view_builder: + self._view_builder = view_builder + elif self._view_builder_class: + self._view_builder = self._view_builder_class() + else: + self._view_builder = None + + +class Fault(webob.exc.HTTPException): + """Wrap webob.exc.HTTPException to provide API friendly response.""" + + _fault_names = { + 400: "badRequest", + 401: "unauthorized", + 403: "forbidden", + 404: "itemNotFound", + 405: "badMethod", + 409: "conflictingRequest", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + self.status_int = exception.status_int + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "computeFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + if code == 413: + retry = self.wrapped_exc.headers['Retry-After'] + fault_data[fault_name]['retryAfter'] = retry + + # 'code' is an attribute on the fault tag itself + metadata = {'attributes': {fault_name: 'code'}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + + content_type = req.best_match_content_type() + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) + self.wrapped_exc.content_type = content_type + _set_request_id_header(req, self.wrapped_exc.headers) + + return self.wrapped_exc + + def __str__(self): + return self.wrapped_exc.__str__() + + +class OverLimitFault(webob.exc.HTTPException): + """ + Rate-limited request response. + """ + + def __init__(self, message, details, retry_time): + """ + Initialize new `OverLimitFault` with relevant information. + """ + hdrs = OverLimitFault._retry_after(retry_time) + self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) + self.content = { + "overLimitFault": { + "code": self.wrapped_exc.status_int, + "message": message, + "details": details, + }, + } + + @staticmethod + def _retry_after(retry_time): + delay = int(math.ceil(retry_time - time.time())) + retry_after = delay if delay > 0 else 0 + headers = {'Retry-After': '%d' % retry_after} + return headers + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """ + Return the wrapped exception with a serialized body conforming to our + error format. + """ + content_type = request.best_match_content_type() + metadata = {"attributes": {"overLimitFault": "code"}} + + xml_serializer = XMLDictSerializer(metadata, XMLNS_V1) + serializer = { + 'application/xml': xml_serializer, + 'application/json': JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) + self.wrapped_exc.body = content + + return self.wrapped_exc + + +def _set_request_id_header(req, headers): + context = req.environ.get('cinder.context') + if context: + headers['x-compute-request-id'] = context.request_id diff --git a/cinder/api/openstack/xmlutil.py b/cinder/api/openstack/xmlutil.py new file mode 100644 index 00000000000..5dfe0c1220b --- /dev/null +++ b/cinder/api/openstack/xmlutil.py @@ -0,0 +1,908 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from cinder import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_COMMON_V10 = 'http://docs.openstack.org/common/api/v1.0' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' +XMLNS_VOLUME_V1 = 'http://docs.openstack.org/volume/api/v1' + + +def validate_schema(xml, schema_name): + if isinstance(xml, str): + xml = etree.fromstring(xml) + base_path = 'cinder/api/openstack/compute/schemas/v1.1/' + if schema_name in ('atom', 'atom-link'): + base_path = 'cinder/api/openstack/compute/schemas/' + schema_path = os.path.join(utils.cinderdir(), + '%s%s.rng' % (base_path, schema_name)) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) + + +class Selector(object): + """Selects datum to operate on from an object.""" + + def __init__(self, *chain): + """Initialize the selector. + + Each argument is a subsequent index into the object. + """ + + self.chain = chain + + def __repr__(self): + """Return a representation of the selector.""" + + return "Selector" + repr(self.chain) + + def __call__(self, obj, do_raise=False): + """Select a datum to operate on. + + Selects the relevant datum within the object. + + :param obj: The object from which to select the object. + :param do_raise: If False (the default), return None if the + indexed datum does not exist. Otherwise, + raise a KeyError. + """ + + # Walk the selector list + for elem in self.chain: + # If it's callable, call it + if callable(elem): + obj = elem(obj) + else: + # Use indexing + try: + obj = obj[elem] + except (KeyError, IndexError): + # No sense going any further + if do_raise: + # Convert to a KeyError, for consistency + raise KeyError(elem) + return None + + # Return the finally-selected object + return obj + + +def get_items(obj): + """Get items in obj.""" + + return list(obj.items()) + + +class EmptyStringSelector(Selector): + """Returns the empty string if Selector would return None.""" + def __call__(self, obj, do_raise=False): + """Returns empty string if the selected value does not exist.""" + + try: + return super(EmptyStringSelector, self).__call__(obj, True) + except KeyError: + return "" + + +class ConstantSelector(object): + """Returns a constant.""" + + def __init__(self, value): + """Initialize the selector. + + :param value: The value to return. + """ + + self.value = value + + def __repr__(self): + """Return a representation of the selector.""" + + return repr(self.value) + + def __call__(self, _obj, _do_raise=False): + """Select a datum to operate on. + + Returns a constant value. Compatible with + Selector.__call__(). + """ + + return self.value + + +class TemplateElement(object): + """Represent an element in the template.""" + + def __init__(self, tag, attrib=None, selector=None, subselector=None, + **extra): + """Initialize an element. + + Initializes an element in the template. Keyword arguments + specify attributes to be set on the element; values must be + callables. See TemplateElement.set() for more information. + + :param tag: The name of the tag to create. + :param attrib: An optional dictionary of element attributes. + :param selector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + :param subselector: An optional callable taking an object and + optional boolean do_raise indicator and + returning the object bound to the element. + This is used to further refine the datum + object returned by selector in the event + that it is a list of objects. + """ + + # Convert selector into a Selector + if selector is None: + selector = Selector() + elif not callable(selector): + selector = Selector(selector) + + # Convert subselector into a Selector + if subselector is not None and not callable(subselector): + subselector = Selector(subselector) + + self.tag = tag + self.selector = selector + self.subselector = subselector + self.attrib = {} + self._text = None + self._children = [] + self._childmap = {} + + # Run the incoming attributes through set() so that they + # become selectorized + if not attrib: + attrib = {} + attrib.update(extra) + for k, v in attrib.items(): + self.set(k, v) + + def __repr__(self): + """Return a representation of the template element.""" + + return ('<%s.%s %r at %#x>' % + (self.__class__.__module__, self.__class__.__name__, + self.tag, id(self))) + + def __len__(self): + """Return the number of child elements.""" + + return len(self._children) + + def __contains__(self, key): + """Determine whether a child node named by key exists.""" + + return key in self._childmap + + def __getitem__(self, idx): + """Retrieve a child node by index or name.""" + + if isinstance(idx, basestring): + # Allow access by node name + return self._childmap[idx] + else: + return self._children[idx] + + def append(self, elem): + """Append a child to the element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.append(elem) + self._childmap[elem.tag] = elem + + def extend(self, elems): + """Append children to the element.""" + + # Pre-evaluate the elements + elemmap = {} + elemlist = [] + for elem in elems: + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap or elem.tag in elemmap: + raise KeyError(elem.tag) + + elemmap[elem.tag] = elem + elemlist.append(elem) + + # Update the children + self._children.extend(elemlist) + self._childmap.update(elemmap) + + def insert(self, idx, elem): + """Insert a child element at the given index.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Avoid duplications + if elem.tag in self._childmap: + raise KeyError(elem.tag) + + self._children.insert(idx, elem) + self._childmap[elem.tag] = elem + + def remove(self, elem): + """Remove a child element.""" + + # Unwrap templates... + elem = elem.unwrap() + + # Check if element exists + if elem.tag not in self._childmap or self._childmap[elem.tag] != elem: + raise ValueError(_('element is not a child')) + + self._children.remove(elem) + del self._childmap[elem.tag] + + def get(self, key): + """Get an attribute. + + Returns a callable which performs datum selection. + + :param key: The name of the attribute to get. + """ + + return self.attrib[key] + + def set(self, key, value=None): + """Set an attribute. + + :param key: The name of the attribute to set. + + :param value: A callable taking an object and optional boolean + do_raise indicator and returning the datum bound + to the attribute. If None, a Selector() will be + constructed from the key. If a string, a + Selector() will be constructed from the string. + """ + + # Convert value to a selector + if value is None: + value = Selector(key) + elif not callable(value): + value = Selector(value) + + self.attrib[key] = value + + def keys(self): + """Return the attribute names.""" + + return self.attrib.keys() + + def items(self): + """Return the attribute names and values.""" + + return self.attrib.items() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # We are a template element + return self + + def wrap(self): + """Wraps a template element to return a template.""" + + # Wrap in a basic Template + return Template(self) + + def apply(self, elem, obj): + """Apply text and attributes to an etree.Element. + + Applies the text and attribute instructions in the template + element to an etree.Element instance. + + :param elem: An etree.Element instance. + :param obj: The base object associated with this template + element. + """ + + # Start with the text... + if self.text is not None: + elem.text = unicode(self.text(obj)) + + # Now set up all the attributes... + for key, value in self.attrib.items(): + try: + elem.set(key, unicode(value(obj, True))) + except KeyError: + # Attribute has no value, so don't include it + pass + + def _render(self, parent, datum, patches, nsmap): + """Internal rendering. + + Renders the template node into an etree.Element object. + Returns the etree.Element object. + + :param parent: The parent etree.Element instance. + :param datum: The datum associated with this template element. + :param patches: A list of other template elements that must + also be applied. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance. + """ + + # Allocate a node + if callable(self.tag): + tagname = self.tag(datum) + else: + tagname = self.tag + elem = etree.Element(tagname, nsmap=nsmap) + + # If we have a parent, append the node to the parent + if parent is not None: + parent.append(elem) + + # If the datum is None, do nothing else + if datum is None: + return elem + + # Apply this template element to the element + self.apply(elem, datum) + + # Additionally, apply the patches + for patch in patches: + patch.apply(elem, datum) + + # We have fully rendered the element; return it + return elem + + def render(self, parent, obj, patches=[], nsmap=None): + """Render an object. + + Renders an object against this template node. Returns a list + of two-item tuples, where the first item is an etree.Element + instance and the second item is the datum associated with that + instance. + + :param parent: The parent for the etree.Element instances. + :param obj: The object to render this template element + against. + :param patches: A list of other template elements to apply + when rendering this template element. + :param nsmap: An optional namespace dictionary to attach to + the etree.Element instances. + """ + + # First, get the datum we're rendering + data = None if obj is None else self.selector(obj) + + # Check if we should render at all + if not self.will_render(data): + return [] + elif data is None: + return [(self._render(parent, None, patches, nsmap), None)] + + # Make the data into a list if it isn't already + if not isinstance(data, list): + data = [data] + elif parent is None: + raise ValueError(_('root element selecting a list')) + + # Render all the elements + elems = [] + for datum in data: + if self.subselector is not None: + datum = self.subselector(datum) + elems.append((self._render(parent, datum, patches, nsmap), datum)) + + # Return all the elements rendered, as well as the + # corresponding datum for the next step down the tree + return elems + + def will_render(self, datum): + """Hook method. + + An overridable hook method to determine whether this template + element will be rendered at all. By default, returns False + (inhibiting rendering) if the datum is None. + + :param datum: The datum associated with this template element. + """ + + # Don't render if datum is None + return datum is not None + + def _text_get(self): + """Template element text. + + Either None or a callable taking an object and optional + boolean do_raise indicator and returning the datum bound to + the text of the template element. + """ + + return self._text + + def _text_set(self, value): + # Convert value to a selector + if value is not None and not callable(value): + value = Selector(value) + + self._text = value + + def _text_del(self): + self._text = None + + text = property(_text_get, _text_set, _text_del) + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template rooted at this + element as a string, suitable for inclusion in debug logs. + """ + + # Build the inner contents of the tag... + contents = [self.tag, '!selector=%r' % self.selector] + + # Add the text... + if self.text is not None: + contents.append('!text=%r' % self.text) + + # Add all the other attributes + for key, value in self.attrib.items(): + contents.append('%s=%r' % (key, value)) + + # If there are no children, return it as a closed tag + if len(self) == 0: + return '<%s/>' % ' '.join([str(i) for i in contents]) + + # OK, recurse to our children + children = [c.tree() for c in self] + + # Return the result + return ('<%s>%s' % + (' '.join(contents), ''.join(children), self.tag)) + + +def SubTemplateElement(parent, tag, attrib=None, selector=None, + subselector=None, **extra): + """Create a template element as a child of another. + + Corresponds to the etree.SubElement interface. Parameters are as + for TemplateElement, with the addition of the parent. + """ + + # Convert attributes + attrib = attrib or {} + attrib.update(extra) + + # Get a TemplateElement + elem = TemplateElement(tag, attrib=attrib, selector=selector, + subselector=subselector) + + # Append the parent safely + if parent is not None: + parent.append(elem) + + return elem + + +class Template(object): + """Represent a template.""" + + def __init__(self, root, nsmap=None): + """Initialize a template. + + :param root: The root element of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + self.root = root.unwrap() if root is not None else None + self.nsmap = nsmap or {} + self.serialize_options = dict(encoding='UTF-8', xml_declaration=True) + + def _serialize(self, parent, obj, siblings, nsmap=None): + """Internal serialization. + + Recursive routine to build a tree of etree.Element instances + from an object based on the template. Returns the first + etree.Element instance rendered, or None. + + :param parent: The parent etree.Element instance. Can be + None. + :param obj: The object to render. + :param siblings: The TemplateElement instances against which + to render the object. + :param nsmap: An optional namespace dictionary to be + associated with the etree.Element instance + rendered. + """ + + # First step, render the element + elems = siblings[0].render(parent, obj, siblings[1:], nsmap) + + # Now, recurse to all child elements + seen = set() + for idx, sibling in enumerate(siblings): + for child in sibling: + # Have we handled this child already? + if child.tag in seen: + continue + seen.add(child.tag) + + # Determine the child's siblings + nieces = [child] + for sib in siblings[idx + 1:]: + if child.tag in sib: + nieces.append(sib[child.tag]) + + # Now we recurse for every data element + for elem, datum in elems: + self._serialize(elem, datum, nieces) + + # Return the first element; at the top level, this will be the + # root element + if elems: + return elems[0][0] + + def serialize(self, obj, *args, **kwargs): + """Serialize an object. + + Serializes an object against the template. Returns a string + with the serialized XML. Positional and keyword arguments are + passed to etree.tostring(). + + :param obj: The object to serialize. + """ + + elem = self.make_tree(obj) + if elem is None: + return '' + + for k, v in self.serialize_options.items(): + kwargs.setdefault(k, v) + + # Serialize it into XML + return etree.tostring(elem, *args, **kwargs) + + def make_tree(self, obj): + """Create a tree. + + Serializes an object against the template. Returns an Element + node with appropriate children. + + :param obj: The object to serialize. + """ + + # If the template is empty, return the empty string + if self.root is None: + return None + + # Get the siblings and nsmap of the root element + siblings = self._siblings() + nsmap = self._nsmap() + + # Form the element tree + return self._serialize(None, obj, siblings, nsmap) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. By default, this is the root element itself. + """ + + return [self.root] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + """ + + return self.nsmap.copy() + + def unwrap(self): + """Unwraps a template to return a template element.""" + + # Return the root element + return self.root + + def wrap(self): + """Wraps a template element to return a template.""" + + # We are a template + return self + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. + + :param master: The master template to test. + """ + + return True + + def tree(self): + """Return string representation of the template tree. + + Returns a representation of the template as a string, suitable + for inclusion in debug logs. + """ + + return "%r: %s" % (self, self.root.tree()) + + +class MasterTemplate(Template): + """Represent a master template. + + Master templates are versioned derivatives of templates that + additionally allow slave templates to be attached. Slave + templates allow modification of the serialized result without + directly changing the master. + """ + + def __init__(self, root, version, nsmap=None): + """Initialize a master template. + + :param root: The root element of the template. + :param version: The version number of the template. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(MasterTemplate, self).__init__(root, nsmap) + self.version = version + self.slaves = [] + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object version %s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.version, id(self))) + + def _siblings(self): + """Hook method for computing root siblings. + + An overridable hook method to return the siblings of the root + element. This is the root element plus the root elements of + all the slave templates. + """ + + return [self.root] + [slave.root for slave in self.slaves] + + def _nsmap(self): + """Hook method for computing the namespace dictionary. + + An overridable hook method to return the namespace dictionary. + The namespace dictionary is computed by taking the master + template's namespace dictionary and updating it from all the + slave templates. + """ + + nsmap = self.nsmap.copy() + for slave in self.slaves: + nsmap.update(slave._nsmap()) + return nsmap + + def attach(self, *slaves): + """Attach one or more slave templates. + + Attaches one or more slave templates to the master template. + Slave templates must have a root element with the same tag as + the master template. The slave template's apply() method will + be called to determine if the slave should be applied to this + master; if it returns False, that slave will be skipped. + (This allows filtering of slaves based on the version of the + master template.) + """ + + slave_list = [] + for slave in slaves: + slave = slave.wrap() + + # Make sure we have a tree match + if slave.root.tag != self.root.tag: + slavetag = slave.root.tag + mastertag = self.root.tag + msg = _("Template tree mismatch; adding slave %(slavetag)s " + "to master %(mastertag)s") % locals() + raise ValueError(msg) + + # Make sure slave applies to this template + if not slave.apply(self): + continue + + slave_list.append(slave) + + # Add the slaves + self.slaves.extend(slave_list) + + def copy(self): + """Return a copy of this master template.""" + + # Return a copy of the MasterTemplate + tmp = self.__class__(self.root, self.version, self.nsmap) + tmp.slaves = self.slaves[:] + return tmp + + +class SlaveTemplate(Template): + """Represent a slave template. + + Slave templates are versioned derivatives of templates. Each + slave has a minimum version and optional maximum version of the + master template to which they can be attached. + """ + + def __init__(self, root, min_vers, max_vers=None, nsmap=None): + """Initialize a slave template. + + :param root: The root element of the template. + :param min_vers: The minimum permissible version of the master + template for this slave template to apply. + :param max_vers: An optional upper bound for the master + template version. + :param nsmap: An optional namespace dictionary to be + associated with the root element of the + template. + """ + + super(SlaveTemplate, self).__init__(root, nsmap) + self.min_vers = min_vers + self.max_vers = max_vers + + def __repr__(self): + """Return string representation of the template.""" + + return ("<%s.%s object versions %s-%s at %#x>" % + (self.__class__.__module__, self.__class__.__name__, + self.min_vers, self.max_vers, id(self))) + + def apply(self, master): + """Hook method for determining slave applicability. + + An overridable hook method used to determine if this template + is applicable as a slave to a given master template. This + version requires the master template to have a version number + between min_vers and max_vers. + + :param master: The master template to test. + """ + + # Does the master meet our minimum version requirement? + if master.version < self.min_vers: + return False + + # How about our maximum version requirement? + if self.max_vers is not None and master.version > self.max_vers: + return False + + return True + + +class TemplateBuilder(object): + """Template builder. + + This class exists to allow templates to be lazily built without + having to build them each time they are needed. It must be + subclassed, and the subclass must implement the construct() + method, which must return a Template (or subclass) instance. The + constructor will always return the template returned by + construct(), or, if it has a copy() method, a copy of that + template. + """ + + _tmpl = None + + def __new__(cls, copy=True): + """Construct and return a template. + + :param copy: If True (the default), a copy of the template + will be constructed and returned, if possible. + """ + + # Do we need to construct the template? + if cls._tmpl is None: + tmp = super(TemplateBuilder, cls).__new__(cls) + + # Construct the template + cls._tmpl = tmp.construct() + + # If the template has a copy attribute, return the result of + # calling it + if copy and hasattr(cls._tmpl, 'copy'): + return cls._tmpl.copy() + + # Return the template + return cls._tmpl + + def construct(self): + """Construct a template. + + Called to construct a template instance, which it must return. + Only called once. + """ + + raise NotImplementedError(_("subclasses must implement construct()!")) + + +def make_links(parent, selector=None): + """ + Attach an Atom element to the parent. + """ + + elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM, + selector=selector) + elem.set('rel') + elem.set('type') + elem.set('href') + + # Just for completeness... + return elem + + +def make_flat_dict(name, selector=None, subselector=None, ns=None): + """ + Utility for simple XML templates that traditionally used + XMLDictSerializer with no metadata. Returns a template element + where the top-level element has the given tag name, and where + sub-elements have tag names derived from the object's keys and + text derived from the object's values. This only works for flat + dictionary objects, not dictionaries containing nested lists or + dictionaries. + """ + + # Set up the names we need... + if ns is None: + elemname = name + tagname = Selector(0) + else: + elemname = '{%s}%s' % (ns, name) + tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0]) + + if selector is None: + selector = name + + # Build the root element + root = TemplateElement(elemname, selector=selector, + subselector=subselector) + + # Build an element to represent all the keys and values + elem = SubTemplateElement(root, tagname, selector=get_items) + elem.text = 1 + + # Return the template + return root diff --git a/cinder/api/sizelimit.py b/cinder/api/sizelimit.py new file mode 100644 index 00000000000..3bde3bf8dc0 --- /dev/null +++ b/cinder/api/sizelimit.py @@ -0,0 +1,54 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Request Body limiting middleware. + +""" + +import webob.dec +import webob.exc + +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import wsgi + + +#default request size is 112k +max_request_body_size_opt = cfg.BoolOpt('osapi_max_request_body_size', + default=114688, + help='') + +FLAGS = flags.FLAGS +FLAGS.register_opt(max_request_body_size_opt) +LOG = logging.getLogger(__name__) + + +class RequestBodySizeLimiter(wsgi.Middleware): + """Add a 'cinder.context' to WSGI environ.""" + + def __init__(self, *args, **kwargs): + super(RequestBodySizeLimiter, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + if (req.content_length > FLAGS.osapi_max_request_body_size + or len(req.body) > FLAGS.osapi_max_request_body_size): + msg = _("Request is too large.") + raise webob.exc.HTTPBadRequest(explanation=msg) + else: + return self.application diff --git a/cinder/common/__init__.py b/cinder/common/__init__.py new file mode 100644 index 00000000000..0a3b98867a2 --- /dev/null +++ b/cinder/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/common/memorycache.py b/cinder/common/memorycache.py new file mode 100644 index 00000000000..564526092d0 --- /dev/null +++ b/cinder/common/memorycache.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Super simple fake memcache client.""" + +from cinder import utils + + +class Client(object): + """Replicates a tiny subset of memcached client interface.""" + + def __init__(self, *args, **kwargs): + """Ignores the passed in args.""" + self.cache = {} + + def get(self, key): + """Retrieves the value for a key or None. + + this expunges expired keys during each get""" + + for k in self.cache.keys(): + (timeout, _value) = self.cache[k] + if timeout and utils.utcnow_ts() >= timeout: + del self.cache[k] + + return self.cache.get(key, (0, None))[1] + + def set(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key.""" + timeout = 0 + if time != 0: + timeout = utils.utcnow_ts() + time + self.cache[key] = (timeout, value) + return True + + def add(self, key, value, time=0, min_compress_len=0): + """Sets the value for a key if it doesn't exist.""" + if not self.get(key) is None: + return False + return self.set(key, value, time, min_compress_len) + + def incr(self, key, delta=1): + """Increments the value for a key.""" + value = self.get(key) + if value is None: + return None + new_value = int(value) + delta + self.cache[key] = (self.cache[key][0], str(new_value)) + return new_value diff --git a/cinder/common/policy.py b/cinder/common/policy.py new file mode 100644 index 00000000000..ec944a1ccb6 --- /dev/null +++ b/cinder/common/policy.py @@ -0,0 +1,222 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Common Policy Engine Implementation""" + +import json +import urllib +import urllib2 + + +class NotAuthorized(Exception): + pass + + +_BRAIN = None + + +def set_brain(brain): + """Set the brain used by enforce(). + + Defaults use Brain() if not set. + + """ + global _BRAIN + _BRAIN = brain + + +def reset(): + """Clear the brain used by enforce().""" + global _BRAIN + _BRAIN = None + + +def enforce(match_list, target_dict, credentials_dict): + """Enforces authorization of some rules against credentials. + + :param match_list: nested tuples of data to match against + + The basic brain supports three types of match lists: + + 1) rules + + looks like: ``('rule:compute:get_instance',)`` + + Retrieves the named rule from the rules dict and recursively + checks against the contents of the rule. + + 2) roles + + looks like: ``('role:compute:admin',)`` + + Matches if the specified role is in credentials_dict['roles']. + + 3) generic + + looks like: ``('tenant_id:%(tenant_id)s',)`` + + Substitutes values from the target dict into the match using + the % operator and matches them against the creds dict. + + Combining rules: + + The brain returns True if any of the outer tuple of rules + match and also True if all of the inner tuples match. You + can use this to perform simple boolean logic. For + example, the following rule would return True if the creds + contain the role 'admin' OR the if the tenant_id matches + the target dict AND the the creds contains the role + 'compute_sysadmin': + + :: + + { + "rule:combined": ( + 'role:admin', + ('tenant_id:%(tenant_id)s', 'role:compute_sysadmin') + ) + } + + Note that rule and role are reserved words in the credentials match, so + you can't match against properties with those names. Custom brains may + also add new reserved words. For example, the HttpBrain adds http as a + reserved word. + + :param target_dict: dict of object properties + + Target dicts contain as much information as we can about the object being + operated on. + + :param credentials_dict: dict of actor properties + + Credentials dicts contain as much information as we can about the user + performing the action. + + :raises NotAuthorized: if the check fails + + """ + global _BRAIN + if not _BRAIN: + _BRAIN = Brain() + if not _BRAIN.check(match_list, target_dict, credentials_dict): + raise NotAuthorized() + + +class Brain(object): + """Implements policy checking.""" + @classmethod + def load_json(cls, data, default_rule=None): + """Init a brain using json instead of a rules dictionary.""" + rules_dict = json.loads(data) + return cls(rules=rules_dict, default_rule=default_rule) + + def __init__(self, rules=None, default_rule=None): + self.rules = rules or {} + self.default_rule = default_rule + + def add_rule(self, key, match): + self.rules[key] = match + + def _check(self, match, target_dict, cred_dict): + match_kind, match_value = match.split(':', 1) + try: + f = getattr(self, '_check_%s' % match_kind) + except AttributeError: + if not self._check_generic(match, target_dict, cred_dict): + return False + else: + if not f(match_value, target_dict, cred_dict): + return False + return True + + def check(self, match_list, target_dict, cred_dict): + """Checks authorization of some rules against credentials. + + Detailed description of the check with examples in policy.enforce(). + + :param match_list: nested tuples of data to match against + :param target_dict: dict of object properties + :param credentials_dict: dict of actor properties + + :returns: True if the check passes + + """ + if not match_list: + return True + for and_list in match_list: + if isinstance(and_list, basestring): + and_list = (and_list,) + if all([self._check(item, target_dict, cred_dict) + for item in and_list]): + return True + return False + + def _check_rule(self, match, target_dict, cred_dict): + """Recursively checks credentials based on the brains rules.""" + try: + new_match_list = self.rules[match] + except KeyError: + if self.default_rule and match != self.default_rule: + new_match_list = ('rule:%s' % self.default_rule,) + else: + return False + + return self.check(new_match_list, target_dict, cred_dict) + + def _check_role(self, match, target_dict, cred_dict): + """Check that there is a matching role in the cred dict.""" + return match.lower() in [x.lower() for x in cred_dict['roles']] + + def _check_generic(self, match, target_dict, cred_dict): + """Check an individual match. + + Matches look like: + + tenant:%(tenant_id)s + role:compute:admin + + """ + + # TODO(termie): do dict inspection via dot syntax + match = match % target_dict + key, value = match.split(':', 1) + if key in cred_dict: + return value == cred_dict[key] + return False + + +class HttpBrain(Brain): + """A brain that can check external urls for policy. + + Posts json blobs for target and credentials. + + """ + + def _check_http(self, match, target_dict, cred_dict): + """Check http: rules by calling to a remote server. + + This example implementation simply verifies that the response is + exactly 'True'. A custom brain using response codes could easily + be implemented. + + """ + url = match % target_dict + data = {'target': json.dumps(target_dict), + 'credentials': json.dumps(cred_dict)} + post_data = urllib.urlencode(data) + f = urllib2.urlopen(url, post_data) + return f.read() == "True" diff --git a/cinder/compat/__init__.py b/cinder/compat/__init__.py new file mode 100644 index 00000000000..8f085d939ff --- /dev/null +++ b/cinder/compat/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/compat/flagfile.py b/cinder/compat/flagfile.py new file mode 100644 index 00000000000..9690217b29d --- /dev/null +++ b/cinder/compat/flagfile.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os +import shutil +import tempfile + +''' +Compatibility code for handling the deprecated --flagfile option. + +gflags style configuration files are deprecated and will be removed in future. + +The code in this module transles --flagfile options into --config-file and can +be removed when support for --flagfile is removed. +''' + + +def _get_flagfile(argp): + '''Parse the filename from a --flagfile argument. + + The current and next arguments are passed as a 2 item list. If the + flagfile filename is in the next argument, the two arguments are + joined into the first item while the second item is set to None. + ''' + i = argp[0].find('-flagfile') + if i < 0: + return None + + # Accept -flagfile or -flagfile + if i != 0 and (i != 1 or argp[0][i] != '-'): + return None + + i += len('-flagfile') + if i == len(argp[0]): # Accept [-]-flagfile foo + argp[0] += '=' + argp[1] + argp[1] = None + + if argp[0][i] != '=': # Accept [-]-flagfile=foo + return None + + return argp[0][i + 1:] + + +def _open_file_for_reading(path): + '''Helper method which test code may stub out.''' + return open(path, 'r') + + +def _open_fd_for_writing(fd, _path): + '''Helper method which test code may stub out.''' + return os.fdopen(fd, 'w') + + +def _read_lines(flagfile): + '''Read a flag file, returning all lines with comments stripped.''' + with _open_file_for_reading(flagfile) as f: + lines = f.readlines() + ret = [] + for l in lines: + if l.isspace() or l.startswith('#') or l.startswith('//'): + continue + ret.append(l.strip()) + return ret + + +def _read_flagfile(arg, next_arg, tempdir=None): + '''Convert a --flagfile argument to --config-file. + + If the supplied argument is a --flagfile argument, read the contents + of the file and convert it to a .ini format config file. Return a + --config-file argument with the converted file. + + If the flag file contains more --flagfile arguments, multiple + --config-file arguments will be returned. + + The returned argument list may also contain None values which should + be filtered out later. + ''' + argp = [arg, next_arg] + flagfile = _get_flagfile(argp) + if not flagfile: + return argp + + args = _read_lines(flagfile) + + if args and not args[0].startswith('--'): + # This is a config file, not a flagfile, so return it. + return ['--config-file=' + flagfile] + argp[1:] + + # + # We're recursing here to convert any --flagfile arguments + # read from this flagfile into --config-file arguments + # + # We don't actually include those --config-file arguments + # in the generated config file; instead we include all those + # --config-file args in the final command line + # + args = _iterate_args(args, _read_flagfile, tempdir=tempdir) + + config_file_args = [] + + (fd, tmpconf) = tempfile.mkstemp(suffix='.conf', dir=tempdir) + + with _open_fd_for_writing(fd, tmpconf) as f: + f.write('[DEFAULT]\n') + for arg in args: + if arg.startswith('--config-file='): + config_file_args.append(arg) + continue + if '=' in arg: + f.write(arg[2:] + '\n') + elif arg[2:].startswith('no'): + f.write(arg[4:] + '=false\n') + else: + f.write(arg[2:] + '=true\n') + + return ['--config-file=' + tmpconf] + argp[1:] + config_file_args + + +def _iterate_args(args, iterator, **kwargs): + '''Run an iterator function on the supplied args list. + + The iterator is passed the current arg and next arg and returns a + list of args. The returned args replace the suppied args in the + resulting args list. + + The iterator will be passed None for the next arg when processing + the last arg. + ''' + args.append(None) + + ret = [] + for i in range(len(args)): + if args[i] is None: # last item, or consumed file name + continue + + modified = iterator(args[i], args[i + 1], **kwargs) + args[i], args[i + 1] = modified[:2] + + ret.extend(modified[:1] + modified[2:]) # don't append next arg + + return filter(None, ret) + + +def handle_flagfiles(args, tempdir=None): + '''Replace --flagfile arguments with --config-file arguments. + + Replace any --flagfile argument in the supplied list with a --config-file + argument containing a temporary config file with the contents of the flag + file translated to .ini format. + + The tempdir argument is a directory which will be used to create temporary + files. + ''' + return _iterate_args(args[:], _read_flagfile, tempdir=tempdir) + + +@contextlib.contextmanager +def handle_flagfiles_managed(args): + '''A context manager for handle_flagfiles() which removes temp files. + + For use with the 'with' statement, i.e.:: + + with handle_flagfiles_managed(args) as args: + # Do stuff + # Any temporary fils have been removed + ''' + # NOTE(johannes): Would be nice to use utils.tempdir(), but it + # causes an import loop + tempdir = tempfile.mkdtemp(prefix='cinder-conf-') + try: + yield handle_flagfiles(args, tempdir=tempdir) + finally: + shutil.rmtree(tempdir) diff --git a/cinder/compute/__init__.py b/cinder/compute/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/compute/aggregate_states.py b/cinder/compute/aggregate_states.py new file mode 100644 index 00000000000..92e19402776 --- /dev/null +++ b/cinder/compute/aggregate_states.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible states for host aggregates. + +An aggregate may be 'created', in which case the admin has triggered its +creation, but the underlying hypervisor pool has not actually being set up +yet. An aggregate may be 'changing', meaning that the underlying hypervisor +pool is being setup. An aggregate may be 'active', in which case the underlying +hypervisor pool is up and running. An aggregate may be 'dismissed' when it has +no hosts and it has been deleted. An aggregate may be in 'error' in all other +cases. +A 'created' aggregate becomes 'changing' during the first request of +adding a host. During a 'changing' status no other requests will be accepted; +this is to allow the hypervisor layer to instantiate the underlying pool +without any potential race condition that may incur in master/slave-based +configurations. The aggregate goes into the 'active' state when the underlying +pool has been correctly instantiated. +All other operations (e.g. add/remove hosts) that succeed will keep the +aggregate in the 'active' state. If a number of continuous requests fail, +an 'active' aggregate goes into an 'error' state. To recover from such a state, +admin intervention is required. Currently an error state is irreversible, +that is, in order to recover from it an aggregate must be deleted. +""" + +CREATED = 'created' +CHANGING = 'changing' +ACTIVE = 'active' +ERROR = 'error' +DISMISSED = 'dismissed' diff --git a/cinder/context.py b/cinder/context.py new file mode 100644 index 00000000000..a9b5519c2ab --- /dev/null +++ b/cinder/context.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RequestContext: context for requests that persist through all of cinder.""" + +import copy + +from cinder import log as logging +from cinder.openstack.common import local +from cinder import utils + + +LOG = logging.getLogger(__name__) + + +def generate_request_id(): + return 'req-' + str(utils.gen_uuid()) + + +class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + + def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None, overwrite=True, + quota_class=None, **kwargs): + """ + :param read_deleted: 'no' indicates deleted records are hidden, 'yes' + indicates deleted records are visible, 'only' indicates that + *only* deleted records are visible. + + :param overwrite: Set to False to ensure that the greenthread local + copy of the index is not overwritten. + + :param kwargs: Extra arguments that might be present, but we ignore + because they possibly came in from older rpc messages. + """ + if kwargs: + LOG.warn(_('Arguments dropped when creating context: %s') % + str(kwargs)) + + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.is_admin = is_admin + if self.is_admin is None: + self.is_admin = 'admin' in [x.lower() for x in self.roles] + elif self.is_admin and 'admin' not in self.roles: + self.roles.append('admin') + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = utils.utcnow() + if isinstance(timestamp, basestring): + timestamp = utils.parse_strtime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = generate_request_id() + self.request_id = request_id + self.auth_token = auth_token + self.quota_class = quota_class + if overwrite or not hasattr(local.store, 'context'): + self.update_store() + + def _get_read_deleted(self): + return self._read_deleted + + def _set_read_deleted(self, read_deleted): + if read_deleted not in ('no', 'yes', 'only'): + raise ValueError(_("read_deleted can only be one of 'no', " + "'yes' or 'only', not %r") % read_deleted) + self._read_deleted = read_deleted + + def _del_read_deleted(self): + del self._read_deleted + + read_deleted = property(_get_read_deleted, _set_read_deleted, + _del_read_deleted) + + def update_store(self): + local.store.context = self + + def to_dict(self): + return {'user_id': self.user_id, + 'project_id': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'roles': self.roles, + 'remote_address': self.remote_address, + 'timestamp': utils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token, + 'quota_class': self.quota_class} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def elevated(self, read_deleted=None, overwrite=False): + """Return a version of this context with admin flag set.""" + context = copy.copy(self) + context.is_admin = True + + if 'admin' not in context.roles: + context.roles.append('admin') + + if read_deleted is not None: + context.read_deleted = read_deleted + + return context + + +def get_admin_context(read_deleted="no"): + return RequestContext(user_id=None, + project_id=None, + is_admin=True, + read_deleted=read_deleted, + overwrite=False) diff --git a/cinder/db/__init__.py b/cinder/db/__init__.py new file mode 100644 index 00000000000..f4eb417ec9f --- /dev/null +++ b/cinder/db/__init__.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +DB abstraction for Cinder +""" + +from cinder.db.api import * diff --git a/cinder/db/api.py b/cinder/db/api.py new file mode 100644 index 00000000000..1e39531fda6 --- /dev/null +++ b/cinder/db/api.py @@ -0,0 +1,1335 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Defines interface for DB access. + +The underlying driver is loaded as a :class:`LazyPluggable`. + +Functions in this module are imported into the cinder.db namespace. Call these +functions from cinder.db namespace, not the cinder.db.api namespace. + +All functions in this module return objects that implement a dictionary-like +interface. Currently, many of these objects are sqlalchemy objects that +implement a dictionary interface. However, a future goal is to have all of +these objects be simple dictionaries. + + +**Related Flags** + +:db_backend: string to lookup in the list of LazyPluggable backends. + `sqlalchemy` is the only supported backend right now. + +:sql_connection: string specifying the sqlalchemy connection to use, like: + `sqlite:///var/lib/cinder/cinder.sqlite`. + +:enable_new_services: when adding a new service to the database, is it in the + pool of available hardware (Default: True) + +""" + +from cinder import exception +from cinder import flags +from cinder.openstack.common import cfg +from cinder import utils + + +db_opts = [ + cfg.StrOpt('db_backend', + default='sqlalchemy', + help='The backend to use for db'), + cfg.BoolOpt('enable_new_services', + default=True, + help='Services to be added to the available pool on create'), + cfg.StrOpt('instance_name_template', + default='instance-%08x', + help='Template string to be used to generate instance names'), + cfg.StrOpt('volume_name_template', + default='volume-%s', + help='Template string to be used to generate instance names'), + cfg.StrOpt('snapshot_name_template', + default='snapshot-%s', + help='Template string to be used to generate snapshot names'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(db_opts) + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.api') + + +class NoMoreNetworks(exception.Error): + """No more available networks.""" + pass + + +class NoMoreTargets(exception.Error): + """No more available targets""" + pass + + +################### + + +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + return IMPL.service_destroy(context, service_id) + + +def service_get(context, service_id): + """Get a service or raise if it does not exist.""" + return IMPL.service_get(context, service_id) + + +def service_get_by_host_and_topic(context, host, topic): + """Get a service by host it's on and topic it listens to.""" + return IMPL.service_get_by_host_and_topic(context, host, topic) + + +def service_get_all(context, disabled=None): + """Get all services.""" + return IMPL.service_get_all(context, disabled) + + +def service_get_all_by_topic(context, topic): + """Get all services for a given topic.""" + return IMPL.service_get_all_by_topic(context, topic) + + +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + +def service_get_all_compute_by_host(context, host): + """Get all compute services for a given host.""" + return IMPL.service_get_all_compute_by_host(context, host) + + +def service_get_all_compute_sorted(context): + """Get all compute services sorted by instance count. + + :returns: a list of (Service, instance_count) tuples. + + """ + return IMPL.service_get_all_compute_sorted(context) + + +def service_get_all_volume_sorted(context): + """Get all volume services sorted by volume count. + + :returns: a list of (Service, volume_count) tuples. + + """ + return IMPL.service_get_all_volume_sorted(context) + + +def service_get_by_args(context, host, binary): + """Get the state of an service by node name and binary.""" + return IMPL.service_get_by_args(context, host, binary) + + +def service_create(context, values): + """Create a service from the values dictionary.""" + return IMPL.service_create(context, values) + + +def service_update(context, service_id, values): + """Set the given properties on an service and update it. + + Raises NotFound if service does not exist. + + """ + return IMPL.service_update(context, service_id, values) + + +################### + + +def compute_node_get(context, compute_id): + """Get an computeNode or raise if it does not exist.""" + return IMPL.compute_node_get(context, compute_id) + + +def compute_node_get_all(context): + """Get all computeNodes.""" + return IMPL.compute_node_get_all(context) + + +def compute_node_create(context, values): + """Create a computeNode from the values dictionary.""" + return IMPL.compute_node_create(context, values) + + +def compute_node_update(context, compute_id, values, auto_adjust=True): + """Set the given properties on an computeNode and update it. + + Raises NotFound if computeNode does not exist. + """ + return IMPL.compute_node_update(context, compute_id, values, auto_adjust) + + +def compute_node_get_by_host(context, host): + return IMPL.compute_node_get_by_host(context, host) + + +def compute_node_utilization_update(context, host, free_ram_mb_delta=0, + free_disk_gb_delta=0, work_delta=0, vm_delta=0): + return IMPL.compute_node_utilization_update(context, host, + free_ram_mb_delta, free_disk_gb_delta, work_delta, + vm_delta) + + +def compute_node_utilization_set(context, host, free_ram_mb=None, + free_disk_gb=None, work=None, vms=None): + return IMPL.compute_node_utilization_set(context, host, free_ram_mb, + free_disk_gb, work, vms) + +################### + + +def certificate_create(context, values): + """Create a certificate from the values dictionary.""" + return IMPL.certificate_create(context, values) + + +def certificate_get_all_by_project(context, project_id): + """Get all certificates for a project.""" + return IMPL.certificate_get_all_by_project(context, project_id) + + +def certificate_get_all_by_user(context, user_id): + """Get all certificates for a user.""" + return IMPL.certificate_get_all_by_user(context, user_id) + + +def certificate_get_all_by_user_and_project(context, user_id, project_id): + """Get all certificates for a user and project.""" + return IMPL.certificate_get_all_by_user_and_project(context, + user_id, + project_id) + + +################### + +def floating_ip_get(context, id): + return IMPL.floating_ip_get(context, id) + + +def floating_ip_get_pools(context): + """Returns a list of floating ip pools""" + return IMPL.floating_ip_get_pools(context) + + +def floating_ip_allocate_address(context, project_id, pool): + """Allocate free floating ip from specified pool and return the address. + + Raises if one is not available. + + """ + return IMPL.floating_ip_allocate_address(context, project_id, pool) + + +def floating_ip_create(context, values): + """Create a floating ip from the values dictionary.""" + return IMPL.floating_ip_create(context, values) + + +def floating_ip_count_by_project(context, project_id): + """Count floating ips used by project.""" + return IMPL.floating_ip_count_by_project(context, project_id) + + +def floating_ip_deallocate(context, address): + """Deallocate an floating ip by address.""" + return IMPL.floating_ip_deallocate(context, address) + + +def floating_ip_destroy(context, address): + """Destroy the floating_ip or raise if it does not exist.""" + return IMPL.floating_ip_destroy(context, address) + + +def floating_ip_disassociate(context, address): + """Disassociate an floating ip from a fixed ip by address. + + :returns: the address of the existing fixed ip. + + """ + return IMPL.floating_ip_disassociate(context, address) + + +def floating_ip_fixed_ip_associate(context, floating_address, + fixed_address, host): + """Associate an floating ip to a fixed_ip by address.""" + return IMPL.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address, + host) + + +def floating_ip_get_all(context): + """Get all floating ips.""" + return IMPL.floating_ip_get_all(context) + + +def floating_ip_get_all_by_host(context, host): + """Get all floating ips by host.""" + return IMPL.floating_ip_get_all_by_host(context, host) + + +def floating_ip_get_all_by_project(context, project_id): + """Get all floating ips by project.""" + return IMPL.floating_ip_get_all_by_project(context, project_id) + + +def floating_ip_get_by_address(context, address): + """Get a floating ip by address or raise if it doesn't exist.""" + return IMPL.floating_ip_get_by_address(context, address) + + +def floating_ip_get_by_fixed_address(context, fixed_address): + """Get a floating ips by fixed address""" + return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) + + +def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): + """Get a floating ips by fixed address""" + return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) + + +def floating_ip_update(context, address, values): + """Update a floating ip by address or raise if it doesn't exist.""" + return IMPL.floating_ip_update(context, address, values) + + +def floating_ip_set_auto_assigned(context, address): + """Set auto_assigned flag to floating ip""" + return IMPL.floating_ip_set_auto_assigned(context, address) + + +def dnsdomain_list(context): + """Get a list of all zones in our database, public and private.""" + return IMPL.dnsdomain_list(context) + + +def dnsdomain_register_for_zone(context, fqdomain, zone): + """Associated a DNS domain with an availability zone""" + return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) + + +def dnsdomain_register_for_project(context, fqdomain, project): + """Associated a DNS domain with a project id""" + return IMPL.dnsdomain_register_for_project(context, fqdomain, project) + + +def dnsdomain_unregister(context, fqdomain): + """Purge associations for the specified DNS zone""" + return IMPL.dnsdomain_unregister(context, fqdomain) + + +def dnsdomain_get(context, fqdomain): + """Get the db record for the specified domain.""" + return IMPL.dnsdomain_get(context, fqdomain) + + +#################### + + +def migration_update(context, id, values): + """Update a migration instance.""" + return IMPL.migration_update(context, id, values) + + +def migration_create(context, values): + """Create a migration record.""" + return IMPL.migration_create(context, values) + + +def migration_get(context, migration_id): + """Finds a migration by the id.""" + return IMPL.migration_get(context, migration_id) + + +def migration_get_by_instance_and_status(context, instance_uuid, status): + """Finds a migration by the instance uuid its migrating.""" + return IMPL.migration_get_by_instance_and_status(context, instance_uuid, + status) + + +def migration_get_all_unconfirmed(context, confirm_window): + """Finds all unconfirmed migrations within the confirmation window.""" + return IMPL.migration_get_all_unconfirmed(context, confirm_window) + + +################### + + +def queue_get_for(context, topic, physical_node_id): + """Return a channel to send a message to a node with a topic.""" + return IMPL.queue_get_for(context, topic, physical_node_id) + + +################### + + +def iscsi_target_count_by_host(context, host): + """Return count of export devices.""" + return IMPL.iscsi_target_count_by_host(context, host) + + +def iscsi_target_create_safe(context, values): + """Create an iscsi_target from the values dictionary. + + The device is not returned. If the create violates the unique + constraints because the iscsi_target and host already exist, + no exception is raised. + + """ + return IMPL.iscsi_target_create_safe(context, values) + + +############### + + +def auth_token_destroy(context, token_id): + """Destroy an auth token.""" + return IMPL.auth_token_destroy(context, token_id) + + +def auth_token_get(context, token_hash): + """Retrieves a token given the hash representing it.""" + return IMPL.auth_token_get(context, token_hash) + + +def auth_token_update(context, token_hash, values): + """Updates a token given the hash representing it.""" + return IMPL.auth_token_update(context, token_hash, values) + + +def auth_token_create(context, token): + """Creates a new token.""" + return IMPL.auth_token_create(context, token) + + +################### + + +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) + + +def quota_get(context, project_id, resource): + """Retrieve a quota or raise if it does not exist.""" + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) + + +def quota_destroy(context, project_id, resource): + """Destroy the quota or raise if it does not exist.""" + return IMPL.quota_destroy(context, project_id, resource) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +################### + + +def quota_class_create(context, class_name, resource, limit): + """Create a quota class for the given name and resource.""" + return IMPL.quota_class_create(context, class_name, resource, limit) + + +def quota_class_get(context, class_name, resource): + """Retrieve a quota class or raise if it does not exist.""" + return IMPL.quota_class_get(context, class_name, resource) + + +def quota_class_get_all_by_name(context, class_name): + """Retrieve all quotas associated with a given quota class.""" + return IMPL.quota_class_get_all_by_name(context, class_name) + + +def quota_class_update(context, class_name, resource, limit): + """Update a quota class or raise if it does not exist.""" + return IMPL.quota_class_update(context, class_name, resource, limit) + + +def quota_class_destroy(context, class_name, resource): + """Destroy the quota class or raise if it does not exist.""" + return IMPL.quota_class_destroy(context, class_name, resource) + + +def quota_class_destroy_all_by_name(context, class_name): + """Destroy all quotas associated with a given quota class.""" + return IMPL.quota_class_destroy_all_by_name(context, class_name) + + +################### + + +def volume_allocate_iscsi_target(context, volume_id, host): + """Atomically allocate a free iscsi_target from the pool.""" + return IMPL.volume_allocate_iscsi_target(context, volume_id, host) + + +def volume_attached(context, volume_id, instance_id, mountpoint): + """Ensure that a volume is set as attached.""" + return IMPL.volume_attached(context, volume_id, instance_id, mountpoint) + + +def volume_create(context, values): + """Create a volume from the values dictionary.""" + return IMPL.volume_create(context, values) + + +def volume_data_get_for_project(context, project_id): + """Get (volume_count, gigabytes) for project.""" + return IMPL.volume_data_get_for_project(context, project_id) + + +def volume_destroy(context, volume_id): + """Destroy the volume or raise if it does not exist.""" + return IMPL.volume_destroy(context, volume_id) + + +def volume_detached(context, volume_id): + """Ensure that a volume is set as detached.""" + return IMPL.volume_detached(context, volume_id) + + +def volume_get(context, volume_id): + """Get a volume or raise if it does not exist.""" + return IMPL.volume_get(context, volume_id) + + +def volume_get_all(context): + """Get all volumes.""" + return IMPL.volume_get_all(context) + + +def volume_get_all_by_host(context, host): + """Get all volumes belonging to a host.""" + return IMPL.volume_get_all_by_host(context, host) + + +def volume_get_all_by_instance(context, instance_id): + """Get all volumes belonging to a instance.""" + return IMPL.volume_get_all_by_instance(context, instance_id) + + +def volume_get_all_by_project(context, project_id): + """Get all volumes belonging to a project.""" + return IMPL.volume_get_all_by_project(context, project_id) + + +def volume_get_instance(context, volume_id): + """Get the instance that a volume is attached to.""" + return IMPL.volume_get_instance(context, volume_id) + + +def volume_get_iscsi_target_num(context, volume_id): + """Get the target num (tid) allocated to the volume.""" + return IMPL.volume_get_iscsi_target_num(context, volume_id) + + +def volume_update(context, volume_id, values): + """Set the given properties on an volume and update it. + + Raises NotFound if volume does not exist. + + """ + return IMPL.volume_update(context, volume_id, values) + + +#################### + + +def snapshot_create(context, values): + """Create a snapshot from the values dictionary.""" + return IMPL.snapshot_create(context, values) + + +def snapshot_destroy(context, snapshot_id): + """Destroy the snapshot or raise if it does not exist.""" + return IMPL.snapshot_destroy(context, snapshot_id) + + +def snapshot_get(context, snapshot_id): + """Get a snapshot or raise if it does not exist.""" + return IMPL.snapshot_get(context, snapshot_id) + + +def snapshot_get_all(context): + """Get all snapshots.""" + return IMPL.snapshot_get_all(context) + + +def snapshot_get_all_by_project(context, project_id): + """Get all snapshots belonging to a project.""" + return IMPL.snapshot_get_all_by_project(context, project_id) + + +def snapshot_get_all_for_volume(context, volume_id): + """Get all snapshots for a volume.""" + return IMPL.snapshot_get_all_for_volume(context, volume_id) + + +def snapshot_update(context, snapshot_id, values): + """Set the given properties on an snapshot and update it. + + Raises NotFound if snapshot does not exist. + + """ + return IMPL.snapshot_update(context, snapshot_id, values) + + +#################### + + +def block_device_mapping_create(context, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_create(context, values) + + +def block_device_mapping_update(context, bdm_id, values): + """Update an entry of block device mapping""" + return IMPL.block_device_mapping_update(context, bdm_id, values) + + +def block_device_mapping_update_or_create(context, values): + """Update an entry of block device mapping. + If not existed, create a new entry""" + return IMPL.block_device_mapping_update_or_create(context, values) + + +def block_device_mapping_get_all_by_instance(context, instance_uuid): + """Get all block device mapping belonging to a instance""" + return IMPL.block_device_mapping_get_all_by_instance(context, + instance_uuid) + + +def block_device_mapping_destroy(context, bdm_id): + """Destroy the block device mapping.""" + return IMPL.block_device_mapping_destroy(context, bdm_id) + + +def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, + volume_id): + """Destroy the block device mapping or raise if it does not exist.""" + return IMPL.block_device_mapping_destroy_by_instance_and_volume( + context, instance_uuid, volume_id) + + +#################### + + +def security_group_get_all(context): + """Get all security groups.""" + return IMPL.security_group_get_all(context) + + +def security_group_get(context, security_group_id): + """Get security group by its id.""" + return IMPL.security_group_get(context, security_group_id) + + +def security_group_get_by_name(context, project_id, group_name): + """Returns a security group with the specified name from a project.""" + return IMPL.security_group_get_by_name(context, project_id, group_name) + + +def security_group_get_by_project(context, project_id): + """Get all security groups belonging to a project.""" + return IMPL.security_group_get_by_project(context, project_id) + + +def security_group_get_by_instance(context, instance_id): + """Get security groups to which the instance is assigned.""" + return IMPL.security_group_get_by_instance(context, instance_id) + + +def security_group_exists(context, project_id, group_name): + """Indicates if a group name exists in a project.""" + return IMPL.security_group_exists(context, project_id, group_name) + + +def security_group_in_use(context, group_id): + """Indicates if a security group is currently in use.""" + return IMPL.security_group_in_use(context, group_id) + + +def security_group_create(context, values): + """Create a new security group.""" + return IMPL.security_group_create(context, values) + + +def security_group_destroy(context, security_group_id): + """Deletes a security group.""" + return IMPL.security_group_destroy(context, security_group_id) + + +def security_group_count_by_project(context, project_id): + """Count number of security groups in a project.""" + return IMPL.security_group_count_by_project(context, project_id) + + +#################### + + +def security_group_rule_create(context, values): + """Create a new security group.""" + return IMPL.security_group_rule_create(context, values) + + +def security_group_rule_get_by_security_group(context, security_group_id): + """Get all rules for a a given security group.""" + return IMPL.security_group_rule_get_by_security_group(context, + security_group_id) + + +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + +def security_group_rule_destroy(context, security_group_rule_id): + """Deletes a security group rule.""" + return IMPL.security_group_rule_destroy(context, security_group_rule_id) + + +def security_group_rule_get(context, security_group_rule_id): + """Gets a security group rule.""" + return IMPL.security_group_rule_get(context, security_group_rule_id) + + +def security_group_rule_count_by_group(context, security_group_id): + """Count rules in a given security group.""" + return IMPL.security_group_rule_count_by_group(context, security_group_id) + + +################### + + +def provider_fw_rule_create(context, rule): + """Add a firewall rule at the provider level (all hosts & instances).""" + return IMPL.provider_fw_rule_create(context, rule) + + +def provider_fw_rule_get_all(context): + """Get all provider-level firewall rules.""" + return IMPL.provider_fw_rule_get_all(context) + + +def provider_fw_rule_destroy(context, rule_id): + """Delete a provider firewall rule from the database.""" + return IMPL.provider_fw_rule_destroy(context, rule_id) + + +################### + + +def user_get(context, id): + """Get user by id.""" + return IMPL.user_get(context, id) + + +def user_get_by_uid(context, uid): + """Get user by uid.""" + return IMPL.user_get_by_uid(context, uid) + + +def user_get_by_access_key(context, access_key): + """Get user by access key.""" + return IMPL.user_get_by_access_key(context, access_key) + + +def user_create(context, values): + """Create a new user.""" + return IMPL.user_create(context, values) + + +def user_delete(context, id): + """Delete a user.""" + return IMPL.user_delete(context, id) + + +def user_get_all(context): + """Create a new user.""" + return IMPL.user_get_all(context) + + +def user_add_role(context, user_id, role): + """Add another global role for user.""" + return IMPL.user_add_role(context, user_id, role) + + +def user_remove_role(context, user_id, role): + """Remove global role from user.""" + return IMPL.user_remove_role(context, user_id, role) + + +def user_get_roles(context, user_id): + """Get global roles for user.""" + return IMPL.user_get_roles(context, user_id) + + +def user_add_project_role(context, user_id, project_id, role): + """Add project role for user.""" + return IMPL.user_add_project_role(context, user_id, project_id, role) + + +def user_remove_project_role(context, user_id, project_id, role): + """Remove project role from user.""" + return IMPL.user_remove_project_role(context, user_id, project_id, role) + + +def user_get_roles_for_project(context, user_id, project_id): + """Return list of roles a user holds on project.""" + return IMPL.user_get_roles_for_project(context, user_id, project_id) + + +def user_update(context, user_id, values): + """Update user.""" + return IMPL.user_update(context, user_id, values) + + +################### + + +def project_get(context, id): + """Get project by id.""" + return IMPL.project_get(context, id) + + +def project_create(context, values): + """Create a new project.""" + return IMPL.project_create(context, values) + + +def project_add_member(context, project_id, user_id): + """Add user to project.""" + return IMPL.project_add_member(context, project_id, user_id) + + +def project_get_all(context): + """Get all projects.""" + return IMPL.project_get_all(context) + + +def project_get_by_user(context, user_id): + """Get all projects of which the given user is a member.""" + return IMPL.project_get_by_user(context, user_id) + + +def project_remove_member(context, project_id, user_id): + """Remove the given user from the given project.""" + return IMPL.project_remove_member(context, project_id, user_id) + + +def project_update(context, project_id, values): + """Update Remove the given user from the given project.""" + return IMPL.project_update(context, project_id, values) + + +def project_delete(context, project_id): + """Delete project.""" + return IMPL.project_delete(context, project_id) + + +def project_get_networks(context, project_id, associate=True): + """Return the network associated with the project. + + If associate is true, it will attempt to associate a new + network if one is not found, otherwise it returns None. + + """ + return IMPL.project_get_networks(context, project_id, associate) + + +################### + + +def console_pool_create(context, values): + """Create console pool.""" + return IMPL.console_pool_create(context, values) + + +def console_pool_get(context, pool_id): + """Get a console pool.""" + return IMPL.console_pool_get(context, pool_id) + + +def console_pool_get_by_host_type(context, compute_host, proxy_host, + console_type): + """Fetch a console pool for a given proxy host, compute host, and type.""" + return IMPL.console_pool_get_by_host_type(context, + compute_host, + proxy_host, + console_type) + + +def console_pool_get_all_by_host_type(context, host, console_type): + """Fetch all pools for given proxy host and type.""" + return IMPL.console_pool_get_all_by_host_type(context, + host, + console_type) + + +def console_create(context, values): + """Create a console.""" + return IMPL.console_create(context, values) + + +def console_delete(context, console_id): + """Delete a console.""" + return IMPL.console_delete(context, console_id) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + """Get console entry for a given instance and pool.""" + return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) + + +def console_get_all_by_instance(context, instance_id): + """Get consoles for a given instance.""" + return IMPL.console_get_all_by_instance(context, instance_id) + + +def console_get(context, console_id, instance_id=None): + """Get a specific console (possibly on a given instance).""" + return IMPL.console_get(context, console_id, instance_id) + + + ################## + + +def instance_type_create(context, values): + """Create a new instance type.""" + return IMPL.instance_type_create(context, values) + + +def instance_type_get_all(context, inactive=False, filters=None): + """Get all instance types.""" + return IMPL.instance_type_get_all( + context, inactive=inactive, filters=filters) + + +def instance_type_get(context, id): + """Get instance type by id.""" + return IMPL.instance_type_get(context, id) + + +def instance_type_get_by_name(context, name): + """Get instance type by name.""" + return IMPL.instance_type_get_by_name(context, name) + + +def instance_type_get_by_flavor_id(context, id): + """Get instance type by name.""" + return IMPL.instance_type_get_by_flavor_id(context, id) + + +def instance_type_destroy(context, name): + """Delete a instance type.""" + return IMPL.instance_type_destroy(context, name) + + +#################### + + +def cell_create(context, values): + """Create a new child Cell entry.""" + return IMPL.cell_create(context, values) + + +def cell_update(context, cell_id, values): + """Update a child Cell entry.""" + return IMPL.cell_update(context, cell_id, values) + + +def cell_delete(context, cell_id): + """Delete a child Cell.""" + return IMPL.cell_delete(context, cell_id) + + +def cell_get(context, cell_id): + """Get a specific child Cell.""" + return IMPL.cell_get(context, cell_id) + + +def cell_get_all(context): + """Get all child Cells.""" + return IMPL.cell_get_all(context) + + +#################### + + +def instance_metadata_get(context, instance_id): + """Get all metadata for an instance.""" + return IMPL.instance_metadata_get(context, instance_id) + + +def instance_metadata_delete(context, instance_id, key): + """Delete the given metadata item.""" + IMPL.instance_metadata_delete(context, instance_id, key) + + +def instance_metadata_update(context, instance_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.instance_metadata_update(context, instance_id, metadata, delete) + + +#################### + + +def agent_build_create(context, values): + """Create a new agent build entry.""" + return IMPL.agent_build_create(context, values) + + +def agent_build_get_by_triple(context, hypervisor, os, architecture): + """Get agent build by hypervisor/OS/architecture triple.""" + return IMPL.agent_build_get_by_triple(context, hypervisor, os, + architecture) + + +def agent_build_get_all(context): + """Get all agent builds.""" + return IMPL.agent_build_get_all(context) + + +def agent_build_destroy(context, agent_update_id): + """Destroy agent build entry.""" + IMPL.agent_build_destroy(context, agent_update_id) + + +def agent_build_update(context, agent_build_id, values): + """Update agent build entry.""" + IMPL.agent_build_update(context, agent_build_id, values) + + +#################### + + +def bw_usage_get_by_uuids(context, uuids, start_period): + """Return bw usages for instance(s) in a given audit period.""" + return IMPL.bw_usage_get_by_uuids(context, uuids, start_period) + + +def bw_usage_update(context, + uuid, + mac, + start_period, + bw_in, bw_out): + """Update cached bw usage for an instance and network + Creates new record if needed.""" + return IMPL.bw_usage_update(context, + uuid, + mac, + start_period, + bw_in, bw_out) + + +#################### + + +def instance_type_extra_specs_get(context, instance_type_id): + """Get all extra specs for an instance type.""" + return IMPL.instance_type_extra_specs_get(context, instance_type_id) + + +def instance_type_extra_specs_delete(context, instance_type_id, key): + """Delete the given extra specs item.""" + IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) + + +def instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs): + """Create or update instance type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs) + + +################## + + +def volume_metadata_get(context, volume_id): + """Get all metadata for a volume.""" + return IMPL.volume_metadata_get(context, volume_id) + + +def volume_metadata_delete(context, volume_id, key): + """Delete the given metadata item.""" + IMPL.volume_metadata_delete(context, volume_id, key) + + +def volume_metadata_update(context, volume_id, metadata, delete): + """Update metadata if it exists, otherwise create it.""" + IMPL.volume_metadata_update(context, volume_id, metadata, delete) + + +################## + + +def volume_type_create(context, values): + """Create a new volume type.""" + return IMPL.volume_type_create(context, values) + + +def volume_type_get_all(context, inactive=False): + """Get all volume types.""" + return IMPL.volume_type_get_all(context, inactive) + + +def volume_type_get(context, id): + """Get volume type by id.""" + return IMPL.volume_type_get(context, id) + + +def volume_type_get_by_name(context, name): + """Get volume type by name.""" + return IMPL.volume_type_get_by_name(context, name) + + +def volume_type_destroy(context, name): + """Delete a volume type.""" + return IMPL.volume_type_destroy(context, name) + + +#################### + + +def volume_type_extra_specs_get(context, volume_type_id): + """Get all extra specs for a volume type.""" + return IMPL.volume_type_extra_specs_get(context, volume_type_id) + + +def volume_type_extra_specs_delete(context, volume_type_id, key): + """Delete the given extra specs item.""" + IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) + + +def volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs): + """Create or update volume type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, + extra_specs) + + +################### + + +def s3_image_get(context, image_id): + """Find local s3 image represented by the provided id""" + return IMPL.s3_image_get(context, image_id) + + +def s3_image_get_by_uuid(context, image_uuid): + """Find local s3 image represented by the provided uuid""" + return IMPL.s3_image_get_by_uuid(context, image_uuid) + + +def s3_image_create(context, image_uuid): + """Create local s3 image represented by provided uuid""" + return IMPL.s3_image_create(context, image_uuid) + + +#################### + + +def sm_backend_conf_create(context, values): + """Create a new SM Backend Config entry.""" + return IMPL.sm_backend_conf_create(context, values) + + +def sm_backend_conf_update(context, sm_backend_conf_id, values): + """Update a SM Backend Config entry.""" + return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values) + + +def sm_backend_conf_delete(context, sm_backend_conf_id): + """Delete a SM Backend Config.""" + return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id) + + +def sm_backend_conf_get(context, sm_backend_conf_id): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get(context, sm_backend_conf_id) + + +def sm_backend_conf_get_by_sr(context, sr_uuid): + """Get a specific SM Backend Config.""" + return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid) + + +def sm_backend_conf_get_all(context): + """Get all SM Backend Configs.""" + return IMPL.sm_backend_conf_get_all(context) + + +#################### + + +def sm_flavor_create(context, values): + """Create a new SM Flavor entry.""" + return IMPL.sm_flavor_create(context, values) + + +def sm_flavor_update(context, sm_flavor_id, values): + """Update a SM Flavor entry.""" + return IMPL.sm_flavor_update(context, values) + + +def sm_flavor_delete(context, sm_flavor_id): + """Delete a SM Flavor.""" + return IMPL.sm_flavor_delete(context, sm_flavor_id) + + +def sm_flavor_get(context, sm_flavor): + """Get a specific SM Flavor.""" + return IMPL.sm_flavor_get(context, sm_flavor) + + +def sm_flavor_get_all(context): + """Get all SM Flavors.""" + return IMPL.sm_flavor_get_all(context) + + +#################### + + +def sm_volume_create(context, values): + """Create a new child Zone entry.""" + return IMPL.sm_volume_create(context, values) + + +def sm_volume_update(context, volume_id, values): + """Update a child Zone entry.""" + return IMPL.sm_volume_update(context, values) + + +def sm_volume_delete(context, volume_id): + """Delete a child Zone.""" + return IMPL.sm_volume_delete(context, volume_id) + + +def sm_volume_get(context, volume_id): + """Get a specific child Zone.""" + return IMPL.sm_volume_get(context, volume_id) + + +def sm_volume_get_all(context): + """Get all child Zones.""" + return IMPL.sm_volume_get_all(context) + + +#################### + + +def aggregate_create(context, values, metadata=None): + """Create a new aggregate with metadata.""" + return IMPL.aggregate_create(context, values, metadata) + + +def aggregate_get(context, aggregate_id): + """Get a specific aggregate by id.""" + return IMPL.aggregate_get(context, aggregate_id) + + +def aggregate_get_by_host(context, host): + """Get a specific aggregate by host""" + return IMPL.aggregate_get_by_host(context, host) + + +def aggregate_update(context, aggregate_id, values): + """Update the attributes of an aggregates. If values contains a metadata + key, it updates the aggregate metadata too.""" + return IMPL.aggregate_update(context, aggregate_id, values) + + +def aggregate_delete(context, aggregate_id): + """Delete an aggregate.""" + return IMPL.aggregate_delete(context, aggregate_id) + + +def aggregate_get_all(context): + """Get all aggregates.""" + return IMPL.aggregate_get_all(context) + + +def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): + """Add/update metadata. If set_delete=True, it adds only.""" + IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) + + +def aggregate_metadata_get(context, aggregate_id): + """Get metadata for the specified aggregate.""" + return IMPL.aggregate_metadata_get(context, aggregate_id) + + +def aggregate_metadata_delete(context, aggregate_id, key): + """Delete the given metadata key.""" + IMPL.aggregate_metadata_delete(context, aggregate_id, key) + + +def aggregate_host_add(context, aggregate_id, host): + """Add host to the aggregate.""" + IMPL.aggregate_host_add(context, aggregate_id, host) + + +def aggregate_host_get_all(context, aggregate_id): + """Get hosts for the specified aggregate.""" + return IMPL.aggregate_host_get_all(context, aggregate_id) + + +def aggregate_host_delete(context, aggregate_id, host): + """Delete the given host from the aggregate.""" + IMPL.aggregate_host_delete(context, aggregate_id, host) + + +#################### + + +def instance_fault_create(context, values): + """Create a new Instance Fault.""" + return IMPL.instance_fault_create(context, values) + + +def instance_fault_get_by_instance_uuids(context, instance_uuids): + """Get all instance faults for the provided instance_uuids.""" + return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids) diff --git a/cinder/db/base.py b/cinder/db/base.py new file mode 100644 index 00000000000..8b9d437c6dd --- /dev/null +++ b/cinder/db/base.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base class for classes that need modular database access.""" + +from cinder import flags +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils + + +db_driver_opt = cfg.StrOpt('db_driver', + default='cinder.db', + help='driver to use for database access') + +FLAGS = flags.FLAGS +FLAGS.register_opt(db_driver_opt) + + +class Base(object): + """DB driver is injected in the init method.""" + + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db = importutils.import_module(db_driver) # pylint: disable=C0103 diff --git a/cinder/db/migration.py b/cinder/db/migration.py new file mode 100644 index 00000000000..87147ce926c --- /dev/null +++ b/cinder/db/migration.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +from cinder import utils + + +IMPL = utils.LazyPluggable('db_backend', + sqlalchemy='cinder.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/cinder/db/sqlalchemy/__init__.py b/cinder/db/sqlalchemy/__init__.py new file mode 100644 index 00000000000..747015af53e --- /dev/null +++ b/cinder/db/sqlalchemy/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py new file mode 100644 index 00000000000..2d40b30467d --- /dev/null +++ b/cinder/db/sqlalchemy/api.py @@ -0,0 +1,1499 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of SQLAlchemy backend.""" + +import datetime +import functools +import warnings + +from cinder import db +from cinder import exception +from cinder import flags +from cinder import utils +from cinder import log as logging +from cinder.compute import aggregate_states +from cinder.db.sqlalchemy import models +from cinder.db.sqlalchemy.session import get_session +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import joinedload +from sqlalchemy.sql import func +from sqlalchemy.sql.expression import literal_column + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def is_admin_context(context): + """Indicates if the request context is an administrator.""" + if not context: + warnings.warn(_('Use of empty request context is deprecated'), + DeprecationWarning) + raise Exception('die') + return context.is_admin + + +def is_user_context(context): + """Indicates if the request context is a normal user.""" + if not context: + return False + if context.is_admin: + return False + if not context.user_id or not context.project_id: + return False + return True + + +def authorize_project_context(context, project_id): + """Ensures a request has permission to access the given project.""" + if is_user_context(context): + if not context.project_id: + raise exception.NotAuthorized() + elif context.project_id != project_id: + raise exception.NotAuthorized() + + +def authorize_user_context(context, user_id): + """Ensures a request has permission to access the given user.""" + if is_user_context(context): + if not context.user_id: + raise exception.NotAuthorized() + elif context.user_id != user_id: + raise exception.NotAuthorized() + + +def authorize_quota_class_context(context, class_name): + """Ensures a request has permission to access the given quota class.""" + if is_user_context(context): + if not context.quota_class: + raise exception.NotAuthorized() + elif context.quota_class != class_name: + raise exception.NotAuthorized() + + +def require_admin_context(f): + """Decorator to require admin request context. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]): + raise exception.AdminRequired() + return f(*args, **kwargs) + return wrapper + + +def require_context(f): + """Decorator to require *any* user or admin context. + + This does no authorization for user or project access matching, see + :py:func:`authorize_project_context` and + :py:func:`authorize_user_context`. + + The first argument to the wrapped function must be the context. + + """ + + def wrapper(*args, **kwargs): + if not is_admin_context(args[0]) and not is_user_context(args[0]): + raise exception.NotAuthorized() + return f(*args, **kwargs) + return wrapper + + +def require_volume_exists(f): + """Decorator to require the specified volume to exist. + + Requires the wrapped function to use context and volume_id as + their first two arguments. + """ + + def wrapper(context, volume_id, *args, **kwargs): + db.volume_get(context, volume_id) + return f(context, volume_id, *args, **kwargs) + wrapper.__name__ = f.__name__ + return wrapper + + +def require_aggregate_exists(f): + """Decorator to require the specified aggregate to exist. + + Requires the wrapped function to use context and aggregate_id as + their first two arguments. + """ + + @functools.wraps(f) + def wrapper(context, aggregate_id, *args, **kwargs): + db.aggregate_get(context, aggregate_id) + return f(context, aggregate_id, *args, **kwargs) + return wrapper + + +def model_query(context, *args, **kwargs): + """Query helper that accounts for context's `read_deleted` field. + + :param context: context to query under + :param session: if present, the session to use + :param read_deleted: if present, overrides context's read_deleted field. + :param project_only: if present and context is user-type, then restrict + query to match the context's project_id. + """ + session = kwargs.get('session') or get_session() + read_deleted = kwargs.get('read_deleted') or context.read_deleted + project_only = kwargs.get('project_only') + + query = session.query(*args) + + if read_deleted == 'no': + query = query.filter_by(deleted=False) + elif read_deleted == 'yes': + pass # omit the filter to include deleted and active + elif read_deleted == 'only': + query = query.filter_by(deleted=True) + else: + raise Exception( + _("Unrecognized read_deleted value '%s'") % read_deleted) + + if project_only and is_user_context(context): + query = query.filter_by(project_id=context.project_id) + + return query + + +def exact_filter(query, model, filters, legal_keys): + """Applies exact match filtering to a query. + + Returns the updated query. Modifies filters argument to remove + filters consumed. + + :param query: query to apply filters to + :param model: model object the query applies to, for IN-style + filtering + :param filters: dictionary of filters; values that are lists, + tuples, sets, or frozensets cause an 'IN' test to + be performed, while exact matching ('==' operator) + is used for other values + :param legal_keys: list of keys to apply exact filtering to + """ + + filter_dict = {} + + # Walk through all the keys + for key in legal_keys: + # Skip ones we're not filtering on + if key not in filters: + continue + + # OK, filtering on this key; what value do we search for? + value = filters.pop(key) + + if isinstance(value, (list, tuple, set, frozenset)): + # Looking for values in a list; apply to query directly + column_attr = getattr(model, key) + query = query.filter(column_attr.in_(value)) + else: + # OK, simple exact match; save for later + filter_dict[key] = value + + # Apply simple exact matches + if filter_dict: + query = query.filter_by(**filter_dict) + + return query + + +################### + + +@require_admin_context +def service_destroy(context, service_id): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.delete(session=session) + + +@require_admin_context +def service_get(context, service_id, session=None): + result = model_query(context, models.Service, session=session).\ + filter_by(id=service_id).\ + first() + if not result: + raise exception.ServiceNotFound(service_id=service_id) + + return result + + +@require_admin_context +def service_get_all(context, disabled=None): + query = model_query(context, models.Service) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() + + +@require_admin_context +def service_get_all_by_topic(context, topic): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(topic=topic).\ + all() + + +@require_admin_context +def service_get_by_host_and_topic(context, host, topic): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(disabled=False).\ + filter_by(host=host).\ + filter_by(topic=topic).\ + first() + + +@require_admin_context +def service_get_all_by_host(context, host): + return model_query(context, models.Service, read_deleted="no").\ + filter_by(host=host).\ + all() + + +@require_admin_context +def _service_get_all_topic_subquery(context, session, topic, subq, label): + sort_value = getattr(subq.c, label) + return model_query(context, models.Service, + func.coalesce(sort_value, 0), + session=session, read_deleted="no").\ + filter_by(topic=topic).\ + filter_by(disabled=False).\ + outerjoin((subq, models.Service.host == subq.c.host)).\ + order_by(sort_value).\ + all() + + +@require_admin_context +def service_get_all_volume_sorted(context): + session = get_session() + with session.begin(): + topic = 'volume' + label = 'volume_gigabytes' + subq = model_query(context, models.Volume.host, + func.sum(models.Volume.size).label(label), + session=session, read_deleted="no").\ + group_by(models.Volume.host).\ + subquery() + return _service_get_all_topic_subquery(context, + session, + topic, + subq, + label) + + +@require_admin_context +def service_get_by_args(context, host, binary): + result = model_query(context, models.Service).\ + filter_by(host=host).\ + filter_by(binary=binary).\ + first() + + if not result: + raise exception.HostBinaryNotFound(host=host, binary=binary) + + return result + + +@require_admin_context +def service_create(context, values): + service_ref = models.Service() + service_ref.update(values) + if not FLAGS.enable_new_services: + service_ref.disabled = True + service_ref.save() + return service_ref + + +@require_admin_context +def service_update(context, service_id, values): + session = get_session() + with session.begin(): + service_ref = service_get(context, service_id, session=session) + service_ref.update(values) + service_ref.save(session=session) + + +################### + + +def _metadata_refs(metadata_dict, meta_class): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = meta_class() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs + + +def _dict_with_extra_specs(inst_type_query): + """Takes an instance, volume, or instance type query returned + by sqlalchemy and returns it as a dictionary, converting the + extra_specs entry from a list of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + + to a single dict: + + 'extra_specs' : {'k1': 'v1'} + + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) + for x in inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + +################### + + +def queue_get_for(context, topic, physical_node_id): + # FIXME(ja): this should be servername? + return "%s.%s" % (topic, physical_node_id) + + +################### + + +@require_admin_context +def iscsi_target_count_by_host(context, host): + return model_query(context, models.IscsiTarget).\ + filter_by(host=host).\ + count() + + +@require_admin_context +def iscsi_target_create_safe(context, values): + iscsi_target_ref = models.IscsiTarget() + + for (key, value) in values.iteritems(): + iscsi_target_ref[key] = value + try: + iscsi_target_ref.save() + return iscsi_target_ref + except IntegrityError: + return None + + +################### + + +@require_context +def quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) + + return result + + +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref + + +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) + + +@require_admin_context +def quota_destroy(context, project_id, resource): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) + + +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + for quota_ref in quotas: + quota_ref.delete(session=session) + + +################### + + +@require_context +def quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result + + +@require_admin_context +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref + + +@require_admin_context +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) + + +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) + + +@require_admin_context +def quota_class_destroy_all_by_name(context, class_name): + session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) + + +################### + + +@require_admin_context +def volume_allocate_iscsi_target(context, volume_id, host): + session = get_session() + with session.begin(): + iscsi_target_ref = model_query(context, models.IscsiTarget, + session=session, read_deleted="no").\ + filter_by(volume=None).\ + filter_by(host=host).\ + with_lockmode('update').\ + first() + + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + if not iscsi_target_ref: + raise db.NoMoreTargets() + + iscsi_target_ref.volume_id = volume_id + session.add(iscsi_target_ref) + + return iscsi_target_ref.target_num + + +@require_admin_context +def volume_attached(context, volume_id, instance_id, mountpoint): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'in-use' + volume_ref['mountpoint'] = mountpoint + volume_ref['attach_status'] = 'attached' + volume_ref['instance_id'] = instance_id + volume_ref.save(session=session) + + +@require_context +def volume_create(context, values): + values['volume_metadata'] = _metadata_refs(values.get('metadata'), + models.VolumeMetadata) + volume_ref = models.Volume() + if not values.get('id'): + values['id'] = str(utils.gen_uuid()) + volume_ref.update(values) + + session = get_session() + with session.begin(): + volume_ref.save(session=session) + + return volume_ref + + +@require_admin_context +def volume_data_get_for_project(context, project_id): + result = model_query(context, + func.count(models.Volume.id), + func.sum(models.Volume.size), + read_deleted="no").\ + filter_by(project_id=project_id).\ + first() + + # NOTE(vish): convert None to 0 + return (result[0] or 0, result[1] or 0) + + +@require_admin_context +def volume_destroy(context, volume_id): + session = get_session() + with session.begin(): + session.query(models.Volume).\ + filter_by(id=volume_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.IscsiTarget).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + session.query(models.VolumeMetadata).\ + filter_by(volume_id=volume_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def volume_detached(context, volume_id): + session = get_session() + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref['status'] = 'available' + volume_ref['mountpoint'] = None + volume_ref['attach_status'] = 'detached' + volume_ref.instance = None + volume_ref.save(session=session) + + +@require_context +def _volume_get_query(context, session=None, project_only=False): + return model_query(context, models.Volume, session=session, + project_only=project_only).\ + options(joinedload('instance')).\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')) + + +@require_context +def volume_get(context, volume_id, session=None): + result = _volume_get_query(context, session=session, project_only=True).\ + filter_by(id=volume_id).\ + first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result + + +@require_admin_context +def volume_get_all(context): + return _volume_get_query(context).all() + + +@require_admin_context +def volume_get_all_by_host(context, host): + return _volume_get_query(context).filter_by(host=host).all() + + +@require_admin_context +def volume_get_all_by_instance(context, instance_id): + result = model_query(context, models.Volume, read_deleted="no").\ + options(joinedload('volume_metadata')).\ + options(joinedload('volume_type')).\ + filter_by(instance_id=instance_id).\ + all() + + return result + + +@require_context +def volume_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return _volume_get_query(context).filter_by(project_id=project_id).all() + + +@require_admin_context +def volume_get_instance(context, volume_id): + result = _volume_get_query(context).filter_by(id=volume_id).first() + + if not result: + raise exception.VolumeNotFound(volume_id=volume_id) + + return result.instance + + +@require_admin_context +def volume_get_iscsi_target_num(context, volume_id): + result = model_query(context, models.IscsiTarget, read_deleted="yes").\ + filter_by(volume_id=volume_id).\ + first() + + if not result: + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) + + return result.target_num + + +@require_context +def volume_update(context, volume_id, values): + session = get_session() + metadata = values.get('metadata') + if metadata is not None: + volume_metadata_update(context, + volume_id, + values.pop('metadata'), + delete=True) + with session.begin(): + volume_ref = volume_get(context, volume_id, session=session) + volume_ref.update(values) + volume_ref.save(session=session) + + +#################### + +def _volume_metadata_get_query(context, volume_id, session=None): + return model_query(context, models.VolumeMetadata, + session=session, read_deleted="no").\ + filter_by(volume_id=volume_id) + + +@require_context +@require_volume_exists +def volume_metadata_get(context, volume_id): + rows = _volume_metadata_get_query(context, volume_id).all() + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +@require_volume_exists +def volume_metadata_delete(context, volume_id, key): + _volume_metadata_get_query(context, volume_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +@require_volume_exists +def volume_metadata_get_item(context, volume_id, key, session=None): + result = _volume_metadata_get_query(context, volume_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeMetadataNotFound(metadata_key=key, + volume_id=volume_id) + return result + + +@require_context +@require_volume_exists +def volume_metadata_update(context, volume_id, metadata, delete): + session = get_session() + + # Set existing metadata to deleted if delete argument is True + if delete: + original_metadata = volume_metadata_get(context, volume_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + # Now update all existing items with new values, or create new meta objects + for meta_key, meta_value in metadata.iteritems(): + + # update the value whether it exists or not + item = {"value": meta_value} + + try: + meta_ref = volume_metadata_get_item(context, volume_id, + meta_key, session) + except exception.VolumeMetadataNotFound, e: + meta_ref = models.VolumeMetadata() + item.update({"key": meta_key, "volume_id": volume_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +################### + + +@require_context +def snapshot_create(context, values): + snapshot_ref = models.Snapshot() + if not values.get('id'): + values['id'] = str(utils.gen_uuid()) + snapshot_ref.update(values) + + session = get_session() + with session.begin(): + snapshot_ref.save(session=session) + return snapshot_ref + + +@require_admin_context +def snapshot_destroy(context, snapshot_id): + session = get_session() + with session.begin(): + session.query(models.Snapshot).\ + filter_by(id=snapshot_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def snapshot_get(context, snapshot_id, session=None): + result = model_query(context, models.Snapshot, session=session, + project_only=True).\ + filter_by(id=snapshot_id).\ + first() + + if not result: + raise exception.SnapshotNotFound(snapshot_id=snapshot_id) + + return result + + +@require_admin_context +def snapshot_get_all(context): + return model_query(context, models.Snapshot).all() + + +@require_context +def snapshot_get_all_for_volume(context, volume_id): + return model_query(context, models.Snapshot, read_deleted='no', + project_only=True).\ + filter_by(volume_id=volume_id).all() + + +@require_context +def snapshot_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + return model_query(context, models.Snapshot).\ + filter_by(project_id=project_id).\ + all() + + +@require_context +def snapshot_update(context, snapshot_id, values): + session = get_session() + with session.begin(): + snapshot_ref = snapshot_get(context, snapshot_id, session=session) + snapshot_ref.update(values) + snapshot_ref.save(session=session) + + +################### + + +@require_admin_context +def migration_create(context, values): + migration = models.Migration() + migration.update(values) + migration.save() + return migration + + +@require_admin_context +def migration_update(context, id, values): + session = get_session() + with session.begin(): + migration = migration_get(context, id, session=session) + migration.update(values) + migration.save(session=session) + return migration + + +@require_admin_context +def migration_get(context, id, session=None): + result = model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter_by(id=id).\ + first() + + if not result: + raise exception.MigrationNotFound(migration_id=id) + + return result + + +@require_admin_context +def migration_get_by_instance_and_status(context, instance_uuid, status): + result = model_query(context, models.Migration, read_deleted="yes").\ + filter_by(instance_uuid=instance_uuid).\ + filter_by(status=status).\ + first() + + if not result: + raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, + status=status) + + return result + + +@require_admin_context +def migration_get_all_unconfirmed(context, confirm_window, session=None): + confirm_window = datetime.datetime.utcnow() - datetime.timedelta( + seconds=confirm_window) + + return model_query(context, models.Migration, session=session, + read_deleted="yes").\ + filter(models.Migration.updated_at <= confirm_window).\ + filter_by(status="finished").\ + all() + + +################## + + +@require_admin_context +def volume_type_create(context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ + session = get_session() + with session.begin(): + try: + volume_type_get_by_name(context, values['name'], session) + raise exception.VolumeTypeExists(name=values['name']) + except exception.VolumeTypeNotFoundByName: + pass + try: + specs = values.get('extra_specs') + + values['extra_specs'] = _metadata_refs(values.get('extra_specs'), + models.VolumeTypeExtraSpecs) + volume_type_ref = models.VolumeTypes() + volume_type_ref.update(values) + volume_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return volume_type_ref + + +@require_context +def volume_type_get_all(context, inactive=False, filters=None): + """ + Returns a dict describing all volume_types with name as key. + """ + filters = filters or {} + + read_deleted = "yes" if inactive else "no" + rows = model_query(context, models.VolumeTypes, + read_deleted=read_deleted).\ + options(joinedload('extra_specs')).\ + order_by("name").\ + all() + + # TODO(sirp): this patern of converting rows to a result with extra_specs + # is repeated quite a bit, might be worth creating a method for it + result = {} + for row in rows: + result[row['name']] = _dict_with_extra_specs(row) + + return result + + +@require_context +def volume_type_get(context, id, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(id=id).\ + first() + + if not result: + raise exception.VolumeTypeNotFound(volume_type_id=id) + + return _dict_with_extra_specs(result) + + +@require_context +def volume_type_get_by_name(context, name, session=None): + """Returns a dict describing specific volume_type""" + result = model_query(context, models.VolumeTypes, session=session).\ + options(joinedload('extra_specs')).\ + filter_by(name=name).\ + first() + + if not result: + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + else: + return _dict_with_extra_specs(result) + + +@require_admin_context +def volume_type_destroy(context, name): + session = get_session() + with session.begin(): + volume_type_ref = volume_type_get_by_name(context, name, + session=session) + volume_type_id = volume_type_ref['id'] + session.query(models.VolumeTypes).\ + filter_by(id=volume_type_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + session.query(models.VolumeTypeExtraSpecs).\ + filter_by(volume_type_id=volume_type_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +#################### + + +def _volume_type_extra_specs_query(context, volume_type_id, session=None): + return model_query(context, models.VolumeTypeExtraSpecs, session=session, + read_deleted="no").\ + filter_by(volume_type_id=volume_type_id) + + +@require_context +def volume_type_extra_specs_get(context, volume_type_id): + rows = _volume_type_extra_specs_query(context, volume_type_id).\ + all() + + result = {} + for row in rows: + result[row['key']] = row['value'] + + return result + + +@require_context +def volume_type_extra_specs_delete(context, volume_type_id, key): + _volume_type_extra_specs_query(context, volume_type_id).\ + filter_by(key=key).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def volume_type_extra_specs_get_item(context, volume_type_id, key, + session=None): + result = _volume_type_extra_specs_query( + context, volume_type_id, session=session).\ + filter_by(key=key).\ + first() + + if not result: + raise exception.VolumeTypeExtraSpecsNotFound( + extra_specs_key=key, volume_type_id=volume_type_id) + + return result + + +@require_context +def volume_type_extra_specs_update_or_create(context, volume_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = volume_type_extra_specs_get_item( + context, volume_type_id, key, session) + except exception.VolumeTypeExtraSpecsNotFound, e: + spec_ref = models.VolumeTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "volume_type_id": volume_type_id, + "deleted": 0}) + spec_ref.save(session=session) + return specs + + +#################### + + +@require_admin_context +def sm_backend_conf_create(context, values): + backend_conf = models.SMBackendConf() + backend_conf.update(values) + backend_conf.save() + return backend_conf + + +@require_admin_context +def sm_backend_conf_update(context, sm_backend_id, values): + session = get_session() + with session.begin(): + backend_conf = model_query(context, models.SMBackendConf, + session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not backend_conf: + raise exception.NotFound( + _("No backend config with id %(sm_backend_id)s") % locals()) + + backend_conf.update(values) + backend_conf.save(session=session) + return backend_conf + + +@require_admin_context +def sm_backend_conf_delete(context, sm_backend_id): + # FIXME(sirp): for consistency, shouldn't this just mark as deleted with + # `purge` actually deleting the record? + session = get_session() + with session.begin(): + model_query(context, models.SMBackendConf, session=session, + read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + delete() + + +@require_admin_context +def sm_backend_conf_get(context, sm_backend_id): + result = model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(id=sm_backend_id).\ + first() + + if not result: + raise exception.NotFound(_("No backend config with id " + "%(sm_backend_id)s") % locals()) + + return result + + +@require_admin_context +def sm_backend_conf_get_by_sr(context, sr_uuid): + session = get_session() + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + filter_by(sr_uuid=sr_uuid).\ + first() + + +@require_admin_context +def sm_backend_conf_get_all(context): + return model_query(context, models.SMBackendConf, read_deleted="yes").\ + all() + + +#################### + + +def _sm_flavor_get_query(context, sm_flavor_label, session=None): + return model_query(context, models.SMFlavors, session=session, + read_deleted="yes").\ + filter_by(label=sm_flavor_label) + + +@require_admin_context +def sm_flavor_create(context, values): + sm_flavor = models.SMFlavors() + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_update(context, sm_flavor_label, values): + sm_flavor = sm_flavor_get(context, sm_flavor_label) + sm_flavor.update(values) + sm_flavor.save() + return sm_flavor + + +@require_admin_context +def sm_flavor_delete(context, sm_flavor_label): + session = get_session() + with session.begin(): + _sm_flavor_get_query(context, sm_flavor_label).delete() + + +@require_admin_context +def sm_flavor_get(context, sm_flavor_label): + result = _sm_flavor_get_query(context, sm_flavor_label).first() + + if not result: + raise exception.NotFound( + _("No sm_flavor called %(sm_flavor)s") % locals()) + + return result + + +@require_admin_context +def sm_flavor_get_all(context): + return model_query(context, models.SMFlavors, read_deleted="yes").all() + + +############################### + + +def _sm_volume_get_query(context, volume_id, session=None): + return model_query(context, models.SMVolume, session=session, + read_deleted="yes").\ + filter_by(id=volume_id) + + +def sm_volume_create(context, values): + sm_volume = models.SMVolume() + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_update(context, volume_id, values): + sm_volume = sm_volume_get(context, volume_id) + sm_volume.update(values) + sm_volume.save() + return sm_volume + + +def sm_volume_delete(context, volume_id): + session = get_session() + with session.begin(): + _sm_volume_get_query(context, volume_id, session=session).delete() + + +def sm_volume_get(context, volume_id): + result = _sm_volume_get_query(context, volume_id).first() + + if not result: + raise exception.NotFound( + _("No sm_volume with id %(volume_id)s") % locals()) + + return result + + +def sm_volume_get_all(context): + return model_query(context, models.SMVolume, read_deleted="yes").all() + + +################ + + +def _aggregate_get_query(context, model_class, id_field, id, + session=None, read_deleted=None): + return model_query(context, model_class, session=session, + read_deleted=read_deleted).filter(id_field == id) + + +@require_admin_context +def aggregate_create(context, values, metadata=None): + session = get_session() + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.name, + values['name'], + session=session, + read_deleted='yes').first() + values.setdefault('operational_state', aggregate_states.CREATED) + if not aggregate: + aggregate = models.Aggregate() + aggregate.update(values) + aggregate.save(session=session) + elif aggregate.deleted: + values['deleted'] = False + values['deleted_at'] = None + aggregate.update(values) + aggregate.save(session=session) + else: + raise exception.AggregateNameExists(aggregate_name=values['name']) + if metadata: + aggregate_metadata_add(context, aggregate.id, metadata) + return aggregate + + +@require_admin_context +def aggregate_get(context, aggregate_id): + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id).first() + + if not aggregate: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + return aggregate + + +@require_admin_context +def aggregate_get_by_host(context, host): + aggregate_host = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.host, + host).first() + + if not aggregate_host: + raise exception.AggregateHostNotFound(host=host) + + return aggregate_get(context, aggregate_host.aggregate_id) + + +@require_admin_context +def aggregate_update(context, aggregate_id, values): + session = get_session() + aggregate = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id, + session=session).first() + if aggregate: + metadata = values.get('metadata') + if metadata is not None: + aggregate_metadata_add(context, + aggregate_id, + values.pop('metadata'), + set_delete=True) + with session.begin(): + aggregate.update(values) + aggregate.save(session=session) + values['metadata'] = metadata + return aggregate + else: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + +@require_admin_context +def aggregate_delete(context, aggregate_id): + query = _aggregate_get_query(context, + models.Aggregate, + models.Aggregate.id, + aggregate_id) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'operational_state': aggregate_states.DISMISSED, + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateNotFound(aggregate_id=aggregate_id) + + +@require_admin_context +def aggregate_get_all(context): + return model_query(context, models.Aggregate).all() + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_get(context, aggregate_id): + rows = model_query(context, + models.AggregateMetadata).\ + filter_by(aggregate_id=aggregate_id).all() + + return dict([(r['key'], r['value']) for r in rows]) + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_delete(context, aggregate_id, key): + query = _aggregate_get_query(context, + models.AggregateMetadata, + models.AggregateMetadata.aggregate_id, + aggregate_id).\ + filter_by(key=key) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, + metadata_key=key) + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_get_item(context, aggregate_id, key, session=None): + result = _aggregate_get_query(context, + models.AggregateMetadata, + models.AggregateMetadata.aggregate_id, + aggregate_id, session=session, + read_deleted='yes').\ + filter_by(key=key).first() + + if not result: + raise exception.AggregateMetadataNotFound(metadata_key=key, + aggregate_id=aggregate_id) + + return result + + +@require_admin_context +@require_aggregate_exists +def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): + session = get_session() + + if set_delete: + original_metadata = aggregate_metadata_get(context, aggregate_id) + for meta_key, meta_value in original_metadata.iteritems(): + if meta_key not in metadata: + meta_ref = aggregate_metadata_get_item(context, aggregate_id, + meta_key, session) + meta_ref.update({'deleted': True}) + meta_ref.save(session=session) + + meta_ref = None + + for meta_key, meta_value in metadata.iteritems(): + item = {"value": meta_value} + try: + meta_ref = aggregate_metadata_get_item(context, aggregate_id, + meta_key, session) + if meta_ref.deleted: + item.update({'deleted': False, 'deleted_at': None}) + except exception.AggregateMetadataNotFound: + meta_ref = models.AggregateMetadata() + item.update({"key": meta_key, "aggregate_id": aggregate_id}) + + meta_ref.update(item) + meta_ref.save(session=session) + + return metadata + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_get_all(context, aggregate_id): + rows = model_query(context, + models.AggregateHost).\ + filter_by(aggregate_id=aggregate_id).all() + + return [r.host for r in rows] + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_delete(context, aggregate_id, host): + query = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.aggregate_id, + aggregate_id).filter_by(host=host) + if query.first(): + query.update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + else: + raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, + host=host) + + +@require_admin_context +@require_aggregate_exists +def aggregate_host_add(context, aggregate_id, host): + session = get_session() + host_ref = _aggregate_get_query(context, + models.AggregateHost, + models.AggregateHost.aggregate_id, + aggregate_id, + session=session, + read_deleted='yes').\ + filter_by(host=host).first() + if not host_ref: + try: + host_ref = models.AggregateHost() + values = {"host": host, "aggregate_id": aggregate_id, } + host_ref.update(values) + host_ref.save(session=session) + except exception.DBError: + raise exception.AggregateHostConflict(host=host) + elif host_ref.deleted: + host_ref.update({'deleted': False, 'deleted_at': None}) + host_ref.save(session=session) + else: + raise exception.AggregateHostExists(host=host, + aggregate_id=aggregate_id) + return host_ref diff --git a/cinder/db/sqlalchemy/migrate_repo/README b/cinder/db/sqlalchemy/migrate_repo/README new file mode 100644 index 00000000000..6218f8cac42 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/cinder/db/sqlalchemy/migrate_repo/__init__.py b/cinder/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/db/sqlalchemy/migrate_repo/manage.py b/cinder/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 00000000000..09e340f44f9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/cinder/db/sqlalchemy/migrate_repo/migrate.cfg b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 00000000000..10c685c0e50 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=cinder + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py b/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 00000000000..2e21685f8f5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,627 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String +from sqlalchemy import Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('server_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + security_group_inst_assoc = Table('security_group_instance_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + user_project_role_association = Table('user_project_role_association', + meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + tables = [auth_tokens, + instances, key_pairs, networks, fixed_ips, floating_ips, + quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, projects, + user_project_association, user_project_role_association, + user_role_association, volumes, export_devices] + + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=tables) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + auth_tokens = Table('auth_tokens', meta, autoload=True) + export_devices = Table('export_devices', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + key_pairs = Table('key_pairs', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + projects = Table('projects', meta, autoload=True) + quotas = Table('quotas', meta, autoload=True) + security_groups = Table('security_groups', meta, autoload=True) + security_group_inst_assoc = Table('security_group_instance_association', + meta, autoload=True) + security_group_rules = Table('security_group_rules', meta, autoload=True) + services = Table('services', meta, autoload=True) + users = Table('users', meta, autoload=True) + user_project_association = Table('user_project_association', meta, + autoload=True) + user_project_role_association = Table('user_project_role_association', + meta, + autoload=True) + user_role_association = Table('user_role_association', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + + # table order matters, don't change + for table in (auth_tokens, export_devices, floating_ips, fixed_ips, + key_pairs, networks, + quotas, security_group_inst_assoc, + security_group_rules, security_groups, services, + user_project_role_association, user_project_association, + user_role_association, + projects, users, volumes, instances): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 00000000000..ba1576b7b87 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,236 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + # + # New Tables + # + certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + tables = [certificates, console_pools, consoles, instance_actions, + iscsi_targets] + for table in tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=tables) + raise + + auth_tokens.c.user_id.alter(type=String(length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + # + # New Columns + # + instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) + services.create_column(services_availability_zone) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + instances = Table('instances', meta, autoload=True) + services = Table('services', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + auth_tokens = Table('auth_tokens', meta, autoload=True) + + certificates = Table('certificates', meta, autoload=True) + consoles = Table('consoles', meta, autoload=True) + console_pools = Table('console_pools', meta, autoload=True) + instance_actions = Table('instance_actions', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + + # table order matters, don't change + tables = [certificates, consoles, console_pools, instance_actions, + iscsi_targets] + for table in tables: + table.drop() + + auth_tokens.c.user_id.alter(type=Integer()) + + instances.drop_column('availability_zone') + instances.drop_column('locked') + networks.drop_column('cidr_v6') + networks.drop_column('ra_server') + services.drop_column('availability_zone') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql new file mode 100644 index 00000000000..cf5c1a20854 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_postgresql_downgrade.sql @@ -0,0 +1,20 @@ +BEGIN; + + DROP TABLE certificates; + DROP TABLE consoles; + DROP TABLE console_pools; + DROP TABLE instance_actions; + DROP TABLE iscsi_targets; + + ALTER TABLE auth_tokens ADD COLUMN user_id_backup INTEGER; + UPDATE auth_tokens SET user_id_backup = CAST(user_id AS INTEGER); + ALTER TABLE auth_tokens DROP COLUMN user_id; + ALTER TABLE auth_tokens RENAME COLUMN user_id_backup TO user_id; + + ALTER TABLE instances DROP COLUMN availability_zone; + ALTER TABLE instances DROP COLUMN locked; + ALTER TABLE networks DROP COLUMN cidr_v6; + ALTER TABLE networks DROP COLUMN ra_server; + ALTER TABLE services DROP COLUMN availability_zone; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql new file mode 100644 index 00000000000..8c6a5becaac --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/002_sqlite_downgrade.sql @@ -0,0 +1,388 @@ +BEGIN TRANSACTION; + + DROP TABLE certificates; + + DROP TABLE console_pools; + + DROP TABLE consoles; + + DROP TABLE instance_actions; + + DROP TABLE iscsi_targets; + + CREATE TEMPORARY TABLE auth_tokens_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id VARCHAR(255), + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens; + + DROP TABLE auth_tokens; + + CREATE TABLE auth_tokens ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + token_hash VARCHAR(255) NOT NULL, + user_id INTEGER, + server_manageent_url VARCHAR(255), + storage_url VARCHAR(255), + cdn_management_url VARCHAR(255), + PRIMARY KEY (token_hash), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO auth_tokens + SELECT created_at, + updated_at, + deleted_at, + deleted, + token_hash, + user_id, + server_manageent_url, + storage_url, + cdn_management_url + FROM auth_tokens_backup; + + DROP TABLE auth_tokens_backup; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_id VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + instance_type VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + mac_address VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_id, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + instance_type, + user_data, + reservation_id, + mac_address, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM instances_backup; + + DROP TABLE instances_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE services_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + availability_zone VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled, + availability_zone + FROM services; + + DROP TABLE services; + + CREATE TABLE services ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + host VARCHAR(255), + binary VARCHAR(255), + topic VARCHAR(255), + report_count INTEGER NOT NULL, + disabled BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (disabled IN (0, 1)) + ); + + INSERT INTO services + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + host, + binary, + topic, + report_count, + disabled + FROM services_backup; + + DROP TABLE services_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py new file mode 100644 index 00000000000..668b77f0ffc --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks_label = Column( + 'label', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + networks.create_column(networks_label) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('label') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql new file mode 100644 index 00000000000..01601cac07b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql @@ -0,0 +1,111 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server + FROM networks_backup; + + DROP TABLE networks_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py new file mode 100644 index 00000000000..e46d9d44300 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -0,0 +1,66 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + zones = Table('zones', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('api_url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + for table in (zones, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + for table in (zones, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py new file mode 100644 index 00000000000..4eb66111a06 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + for table in (instance_metadata_table, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + quota_metadata_items = Column('metadata_items', Integer()) + quotas.create_column(quota_metadata_items) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + quotas = Table('quotas', meta, autoload=True) + + instance_metadata_table = Table('instance_metadata', meta, autoload=True) + + for table in (instance_metadata_table, ): + table.drop() + + quotas.drop_column('metadata_items') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py new file mode 100644 index 00000000000..df2be9df44c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -0,0 +1,54 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + # Add columns to existing tables + volumes_provider_location = Column('provider_location', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + volumes_provider_auth = Column('provider_auth', + String(length=256, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + volumes.create_column(volumes_provider_location) + volumes.create_column(volumes_provider_auth) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('provider_location') + volumes.drop_column('provider_auth') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql new file mode 100644 index 00000000000..f55c284379a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql @@ -0,0 +1,113 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py new file mode 100644 index 00000000000..d84fa173400 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py @@ -0,0 +1,70 @@ +# Copyright 2011 OpenStack LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + + # + # New Columns + # + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + # Add columns to existing tables + fixed_ips.create_column(fixed_ips_addressV6) + fixed_ips.create_column(fixed_ips_netmaskV6) + fixed_ips.create_column(fixed_ips_gatewayV6) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + + fixed_ips.drop_column('addressV6') + fixed_ips.drop_column('netmaskV6') + fixed_ips.drop_column('gatewayV6') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql new file mode 100644 index 00000000000..44d34769820 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/007_sqlite_downgrade.sql @@ -0,0 +1,79 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN DEFAULT FALSE, + leased BOOLEAN DEFAULT FALSE, + reserved BOOLEAN DEFAULT FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + CHECK (leased IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (deleted IN (0, 1)), + CHECK (reserved IN (0, 1)) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py new file mode 100644 index 00000000000..98e53862827 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here + # Don't create your own engine; bind migrate_engine + # to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + instance_types = Table('instance_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('id', Integer(), primary_key=True, nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('vcpus', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('flavorid', Integer(), nullable=False, unique=True), + Column('swap', Integer(), nullable=False, default=0), + Column('rxtx_quota', Integer(), nullable=False, default=0), + Column('rxtx_cap', Integer(), nullable=False, default=0)) + try: + instance_types.create() + except Exception: + LOG.info(repr(instance_types)) + LOG.exception('Exception while creating instance_types table') + raise + + # Here are the old static instance types + INSTANCE_TYPES = { + 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1), + 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2), + 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3), + 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4), + 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)} + try: + i = instance_types.insert() + for name, values in INSTANCE_TYPES.iteritems(): + # FIXME(kpepple) should we be seeding created_at / updated_at ? + # now = datetime.datatime.utcnow() + i.execute({'name': name, 'memory_mb': values["memory_mb"], + 'vcpus': values["vcpus"], 'deleted': False, + 'local_gb': values["local_gb"], + 'flavorid': values["flavorid"]}) + except Exception: + LOG.info(repr(instance_types)) + LOG.exception('Exception while seeding instance_types table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + for table in (instance_types, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py new file mode 100644 index 00000000000..acedd3ad022 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + # + # New Tables + # + migrations = Table('migrations', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('source_compute', String(255)), + Column('dest_compute', String(255)), + Column('dest_host', String(255)), + Column('instance_id', Integer, ForeignKey('instances.id'), + nullable=True), + Column('status', String(255)), + ) + + for table in (migrations, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + migrations = Table('migrations', meta, autoload=True) + + for table in (migrations, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py new file mode 100644 index 00000000000..da01940bd39 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances_os_type = Column('os_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_os_type) + migrate_engine.execute(instances.update()\ + .where(instances.c.os_type == None)\ + .values(os_type='linux')) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('os_type') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py new file mode 100644 index 00000000000..c2a3560a390 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/011_live_migration.py @@ -0,0 +1,85 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData +from sqlalchemy import Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('service_id', Integer(), nullable=False), + + Column('vcpus', Integer(), nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('vcpus_used', Integer(), nullable=False), + Column('memory_mb_used', Integer(), nullable=False), + Column('local_gb_used', Integer(), nullable=False), + Column('hypervisor_type', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('hypervisor_version', Integer(), nullable=False), + Column('cpu_info', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + try: + compute_nodes.create() + except Exception: + LOG.info(repr(compute_nodes)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[compute_nodes]) + raise + + instances_launched_on = Column( + 'launched_on', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_launched_on) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + compute_nodes = Table('compute_nodes', meta, autoload=True) + + compute_nodes.drop() + + instances.drop_column('launched_on') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py new file mode 100644 index 00000000000..a626d2c7dbd --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py @@ -0,0 +1,90 @@ +# Copyright (c) 2011 NTT. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + + # Alter column name + networks.c.ra_server.alter(name='gateway_v6') + # Add new column to existing table + networks_netmask_v6 = Column( + 'netmask_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + networks.create_column(networks_netmask_v6) + + # drop existing columns from table + fixed_ips.c.addressV6.drop() + fixed_ips.c.netmaskV6.drop() + fixed_ips.c.gatewayV6.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + + networks.c.gateway_v6.alter(name='ra_server') + networks.drop_column('netmask_v6') + + fixed_ips_addressV6 = Column( + "addressV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_netmaskV6 = Column( + "netmaskV6", + String( + length=3, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + fixed_ips_gatewayV6 = Column( + "gatewayV6", + String( + length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + for column in (fixed_ips_addressV6, + fixed_ips_netmaskV6, + fixed_ips_gatewayV6): + fixed_ips.create_column(column) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql new file mode 100644 index 00000000000..0779f50e8a9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/012_sqlite_upgrade.sql @@ -0,0 +1,195 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + ra_server VARCHAR(255), + label VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server, + label + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + ra_server AS gateway_v6, + label, + NULL AS netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + addressV6 VARCHAR(255), + netmaskV6 VARCHAR(3), + gatewayV6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + addressV6, + netmaskV6, + gatewayV6 + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py new file mode 100644 index 00000000000..d8735ec7df9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + + migrations.create_column(old_flavor_id) + migrations.create_column(new_flavor_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + + migrations.drop_column('old_flavor_id') + migrations.drop_column('new_flavor_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql new file mode 100644 index 00000000000..fbba364beab --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE migrations_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + old_flavor_id INTEGER, + new_flavor_id INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status, + old_flavor_id, + new_flavor_id + FROM migrations; + + DROP TABLE migrations; + + CREATE TABLE migrations ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + source_compute VARCHAR(255), + dest_compute VARCHAR(255), + dest_host VARCHAR(255), + instance_id INTEGER, + status VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO migrations + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + source_compute, + dest_compute, + dest_host, + instance_id, + status + FROM migrations_backup; + + DROP TABLE migrations_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py new file mode 100644 index 00000000000..b363caca5e4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + + c_instance_type_id = Column('instance_type_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + instances.create_column(c_instance_type_id) + + type_names = {} + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type == type_name)\ + .values(instance_type_id=type_id)) + + instances.c.instance_type.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + + c_instance_type = Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(c_instance_type) + + type_names = {} + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type_id == type_id)\ + .values(instance_type=type_name)) + + instances.c.instance_type_id.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py new file mode 100644 index 00000000000..51db850665e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + floating_ips = Table('floating_ips', meta, autoload=True) + c_auto_assigned = Column('auto_assigned', Boolean, default=False) + floating_ips.create_column(c_auto_assigned) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + floating_ips = Table('floating_ips', meta, autoload=True) + floating_ips.drop_column('auto_assigned') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql new file mode 100644 index 00000000000..c599ef2b355 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/015_sqlite_downgrade.sql @@ -0,0 +1,62 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (auto_assigned IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py new file mode 100644 index 00000000000..1c7081c4ad6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -0,0 +1,213 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table + +from cinder import utils + +resources = [ + 'instances', + 'cores', + 'volumes', + 'gigabytes', + 'floating_ips', + 'metadata_items', +] + + +def old_style_quotas_table(meta, name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=utils.utcnow), + Column('updated_at', DateTime(), + onupdate=utils.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + Column('metadata_items', Integer()), + ) + + +def new_style_quotas_table(meta, name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=utils.utcnow), + Column('updated_at', DateTime(), + onupdate=utils.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('resource', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=False), + Column('hard_limit', Integer(), nullable=True), + ) + + +def quotas_table(meta, name='quotas'): + return Table(name, meta, autoload=True) + + +def _assert_no_duplicate_project_ids(quotas): + project_ids = set() + message = ('There are multiple active quotas for project "%s" ' + '(among others, possibly). ' + 'Please resolve all ambiguous quotas before ' + 'reattempting the migration.') + for quota in quotas: + assert quota.project_id not in project_ids, message % quota.project_id + project_ids.add(quota.project_id) + + +def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + select = quotas.select().where(quotas.c.deleted == False) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + for resource in resources: + select = quotas.select().\ + where(quotas.c.deleted == False).\ + where(quotas.c.resource == resource) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def convert_forward(migrate_engine, old_quotas, new_quotas): + quotas = list(migrate_engine.execute(old_quotas.select())) + for quota in quotas: + for resource in resources: + hard_limit = getattr(quota, resource) + if hard_limit is None: + continue + insert = new_quotas.insert().values( + created_at=quota.created_at, + updated_at=quota.updated_at, + deleted_at=quota.deleted_at, + deleted=quota.deleted, + project_id=quota.project_id, + resource=resource, + hard_limit=hard_limit) + migrate_engine.execute(insert) + + +def earliest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 < date2: + return date1 + return date2 + + +def latest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 > date2: + return date1 + return date2 + + +def convert_backward(migrate_engine, old_quotas, new_quotas): + quotas = {} + for quota in migrate_engine.execute(new_quotas.select()): + if (quota.resource not in resources + or quota.hard_limit is None or quota.deleted): + continue + if not quota.project_id in quotas: + quotas[quota.project_id] = { + 'project_id': quota.project_id, + 'created_at': quota.created_at, + 'updated_at': quota.updated_at, + quota.resource: quota.hard_limit, + } + else: + quotas[quota.project_id]['created_at'] = earliest( + quota.created_at, quotas[quota.project_id]['created_at']) + quotas[quota.project_id]['updated_at'] = latest( + quota.updated_at, quotas[quota.project_id]['updated_at']) + quotas[quota.project_id][quota.resource] = quota.hard_limit + + for quota in quotas.itervalues(): + insert = old_quotas.insert().values(**quota) + migrate_engine.execute(insert) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + old_quotas = quotas_table(meta) + assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas) + + new_quotas = new_style_quotas_table(meta, 'quotas_new') + new_quotas.create() + convert_forward(migrate_engine, old_quotas, new_quotas) + old_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + new_quotas = quotas_table(meta, 'quotas_new') + new_quotas.rename('quotas') + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + new_quotas = quotas_table(meta) + assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas) + + old_quotas = old_style_quotas_table(meta, 'quotas_old') + old_quotas.create() + convert_backward(migrate_engine, old_quotas, new_quotas) + new_quotas.drop() + + # clear metadata to work around this: + # http://code.google.com/p/sqlalchemy-migrate/issues/detail?id=128 + meta.clear() + old_quotas = quotas_table(meta, 'quotas_old') + old_quotas.rename('quotas') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py new file mode 100644 index 00000000000..0aed48a7d19 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + continue + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + LOG.warn("Instance %s did not have instance_type_id " + "converted to an integer because its value is %s" % + (instance.id, instance.instance_type_id)) + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py new file mode 100644 index 00000000000..59ead97ada4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + tokens = Table('auth_tokens', meta, autoload=True) + c_manageent = tokens.c.server_manageent_url + c_manageent.alter(name='server_management_url') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + tokens = Table('auth_tokens', meta, autoload=True) + c_management = tokens.c.server_management_url + c_management.alter(name='server_manageent_url') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py new file mode 100644 index 00000000000..e0670e3c722 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, DateTime, Boolean, String + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + snapshots = Table('snapshots', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', Integer(), nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('progress', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_size', Integer()), + Column('scheduled_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + try: + snapshots.create() + except Exception: + LOG.info(repr(snapshots)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[snapshots]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + snapshots = Table('snapshots', meta, autoload=True) + snapshots.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py new file mode 100644 index 00000000000..c5a632ca08a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Integer + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + snapshot_id = Column('snapshot_id', Integer()) + # Add columns to existing tables + volumes.create_column(snapshot_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('snapshot_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql new file mode 100644 index 00000000000..97b94660453 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/020_sqlite_downgrade.sql @@ -0,0 +1,119 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth + FROM volumes_backup; + + DROP TABLE volumes_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py new file mode 100644 index 00000000000..64b539ed65c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py b/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py new file mode 100644 index 00000000000..2c10b790a68 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + if migrate_engine.name == "mysql": + migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB") + migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB") + migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB") + migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB") + migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB") + migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB") + migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB") + migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB") + migrate_engine.execute("ALTER TABLE instances Engine=InnoDB") + migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB") + migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB") + migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB") + migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB") + migrate_engine.execute("ALTER TABLE networks Engine=InnoDB") + migrate_engine.execute("ALTER TABLE projects Engine=InnoDB") + migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE security_group_instance_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE security_group_rules Engine=InnoDB") + migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB") + migrate_engine.execute("ALTER TABLE services Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_project_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_project_role_association Engine=InnoDB") + migrate_engine.execute( + "ALTER TABLE user_role_association Engine=InnoDB") + migrate_engine.execute("ALTER TABLE users Engine=InnoDB") + migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB") + migrate_engine.execute("ALTER TABLE zones Engine=InnoDB") + migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB") + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py new file mode 100644 index 00000000000..ee607dd9240 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances_vm_mode = Column('vm_mode', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instances.create_column(instances_vm_mode) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('vm_mode') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py new file mode 100644 index 00000000000..5c6ddb97089 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py @@ -0,0 +1,92 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column +from sqlalchemy import DateTime, Boolean, Integer, String +from sqlalchemy import ForeignKey +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + # + # New Tables + # + block_device_mapping = Table('block_device_mapping', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, autoincrement=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('device_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('delete_on_termination', + Boolean(create_constraint=True, name=None), + default=False), + Column('virtual_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True), + Column('snapshot_id', + Integer(), + ForeignKey('snapshots.id'), + nullable=True), + Column('volume_id', Integer(), ForeignKey('volumes.id'), + nullable=True), + Column('volume_size', Integer(), nullable=True), + Column('no_device', + Boolean(create_constraint=True, name=None), + nullable=True), + ) + try: + block_device_mapping.create() + except Exception: + LOG.info(repr(block_device_mapping)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[block_device_mapping]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + block_device_mapping.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py new file mode 100644 index 00000000000..313cb16de2c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) + instances.create_column(uuid_column) + + rows = migrate_engine.execute(instances.select()) + for row in rows: + instance_uuid = str(utils.gen_uuid()) + migrate_engine.execute(instances.update()\ + .where(instances.c.id == row[0])\ + .values(uuid=instance_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py new file mode 100644 index 00000000000..d8f038b0d49 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py @@ -0,0 +1,89 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + builds = Table('agent_builds', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('hypervisor', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('os', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('architecture', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('version', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('url', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('md5hash', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + for table in (builds, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + architecture = Column('architecture', String(length=255)) + + # Add columns to existing tables + instances.create_column(architecture) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + builds = Table('agent_builds', meta, autoload=True) + for table in (builds, ): + table.drop() + + instances = Table('instances', meta, autoload=True) + instances.drop_column('architecture') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py new file mode 100644 index 00000000000..8b653444c1c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + provider_fw_rules = Table('provider_fw_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('protocol', + String(length=5, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + for table in (provider_fw_rules,): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + provider_fw_rules = Table('provider_fw_rules', meta, autoload=True) + for table in (provider_fw_rules,): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py new file mode 100644 index 00000000000..b8346b73541 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + # + # New Tables + # + instance_type_extra_specs_table = Table('instance_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_type_id', + Integer(), + ForeignKey('instance_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + for table in (instance_type_extra_specs_table, ): + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instance_types = Table('instance_types', meta, autoload=True) + + instance_type_extra_specs_table = Table('instance_type_extra_specs', + meta, + autoload=True) + for table in (instance_type_extra_specs_table, ): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py new file mode 100644 index 00000000000..80eb836c06b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py @@ -0,0 +1,41 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Float, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + # + # New Columns + # + weight_offset = Column('weight_offset', Float(), default=0.0) + weight_scale = Column('weight_scale', Float(), default=1.0) + + zones.create_column(weight_offset) + zones.create_column(weight_scale) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('weight_offset') + zones.drop_column('weight_scale') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py b/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py new file mode 100644 index 00000000000..a34baa83d4c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + c = instances.columns['mac_address'] + + interface = Column('bridge_interface', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)) + + virtual_interface_id = Column('virtual_interface_id', + Integer()) + # add interface column to networks table + # values will have to be set manually before running cinder + try: + networks.create_column(interface) + except Exception: + LOG.error(_("interface column not added to networks table")) + raise + + # + # New Tables + # + virtual_interfaces = Table('virtual_interfaces', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('network_id', + Integer(), + ForeignKey('networks.id')), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + mysql_engine='InnoDB') + + # create virtual_interfaces table + try: + virtual_interfaces.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(virtual_interfaces)) + raise + + # add virtual_interface_id column to fixed_ips table + try: + fixed_ips.create_column(virtual_interface_id) + except Exception: + LOG.error(_("VIF column not added to fixed_ips table")) + raise + + # populate the virtual_interfaces table + # extract data from existing instance and fixed_ip tables + s = select([instances.c.id, instances.c.mac_address, + fixed_ips.c.network_id], + fixed_ips.c.instance_id == instances.c.id) + keys = ('instance_id', 'address', 'network_id') + join_list = [dict(zip(keys, row)) for row in s.execute()] + LOG.debug(_("join list for moving mac_addresses |%s|"), join_list) + + # insert data into the table + if join_list: + i = virtual_interfaces.insert() + i.execute(join_list) + + # populate the fixed_ips virtual_interface_id column + s = select([fixed_ips.c.id, fixed_ips.c.instance_id], + fixed_ips.c.instance_id != None) + + for row in s.execute(): + m = select([virtual_interfaces.c.id]).\ + where(virtual_interfaces.c.instance_id == row['instance_id']).\ + as_scalar() + u = fixed_ips.update().values(virtual_interface_id=m).\ + where(fixed_ips.c.id == row['id']) + u.execute() + + # drop the mac_address column from instances + c.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + mac_address = Column('mac_address', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + instances.create_column(mac_address) + + s = select([instances.c.id, virtual_interfaces.c.address], + virtual_interfaces.c.instance_id == instances.c.id) + + for row in s.execute(): + u = instances.update().values(mac_address=row['address']).\ + where(instances.c.id == row['id']) + + networks.drop_column('bridge_interface') + virtual_interfaces.drop() + fixed_ips.drop_column('virtual_interface_id') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql new file mode 100644 index 00000000000..2486e6d2db6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql @@ -0,0 +1,377 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + admin_pass VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + state INTEGER, + state_description VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + mac_address VARCHAR(255), + PRIMARY KEY (id), + CHECK (locked IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + admin_pass, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + state, + state_description, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + NULL AS mac_address + FROM instances_backup; + + DROP TABLE instances_backup; + + UPDATE instances SET mac_address=(SELECT address + FROM virtual_interfaces + WHERE virtual_interfaces.instance_id = instances.id); + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + PRIMARY KEY (id), + CHECK (injected IN (0, 1)), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6 + FROM networks_backup; + + DROP TABLE networks_backup; + + DROP TABLE virtual_interfaces; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + virtual_interface_id INTEGER, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved, + virtual_interface_id + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN, + leased BOOLEAN, + reserved BOOLEAN, + PRIMARY KEY (id), + CHECK (reserved IN (0, 1)), + CHECK (allocated IN (0, 1)), + CHECK (leased IN (0, 1)), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(instance_id) REFERENCES instances (id), + FOREIGN KEY(network_id) REFERENCES networks (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + allocated, + leased, + reserved + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py new file mode 100644 index 00000000000..4c1413b22ee --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py @@ -0,0 +1,59 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + # add foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + # drop foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql new file mode 100644 index 00000000000..c1d26b18031 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql new file mode 100644 index 00000000000..2a9362545f1 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py b/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py new file mode 100644 index 00000000000..f12070c5709 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/032_add_root_device_name.py @@ -0,0 +1,42 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + root_device_name = Column( + 'root_device_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(root_device_name) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('root_device_name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py b/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py new file mode 100644 index 00000000000..becc353f68f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/033_ha_network.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Boolean, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips_host = Column('host', String(255)) + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.create_column(fixed_ips_host) + + networks_multi_host = Column('multi_host', Boolean, default=False) + networks = Table('networks', meta, autoload=True) + networks.create_column(networks_multi_host) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + fixed_ips = Table('fixed_ips', meta, autoload=True) + fixed_ips.drop_column('host') + + networks = Table('networks', meta, autoload=True) + networks.drop_column('multi_host') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql new file mode 100644 index 00000000000..34188d86629 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/033_sqlite_downgrade.sql @@ -0,0 +1,193 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + host VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted, + host + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, + address, + virtual_interface_id, + network_id, + instance_id, + allocated, + leased, + reserved, + created_at, + updated_at, + deleted_at, + deleted + FROM fixed_ips_backup; + + DROP TABLE fixed_ips_backup; + + CREATE TEMPORARY TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host + FROM networks; + + DROP TABLE networks; + + CREATE TABLE networks( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)) + ); + + INSERT INTO networks + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface + FROM networks_backup; + + DROP TABLE networks_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py new file mode 100644 index 00000000000..9cf004301b7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/034_change_instance_id_in_migrations.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, String, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + instance_uuid = Column('instance_uuid', String(255)) + migrations.create_column(instance_uuid) + + if migrate_engine.name == "mysql": + try: + migrate_engine.execute("ALTER TABLE migrations DROP FOREIGN KEY " + "`migrations_ibfk_1`;") + except Exception: # Don't care, just fail silently. + pass + + migrations.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + migrations = Table('migrations', meta, autoload=True) + migrations.c.instance_uuid.drop() + instance_id = Column('instance_id', Integer()) + migrations.create_column(instance_id) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py b/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py new file mode 100644 index 00000000000..c03e5be6625 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.c.dns.alter(name='dns1') + dns2 = Column('dns2', String(255)) + networks.create_column(dns2) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.c.dns1.alter(name='dns') + networks.drop_column('dns2') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py new file mode 100644 index 00000000000..38f83fc0192 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py @@ -0,0 +1,79 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + + old_instance_type_id = Column('old_instance_type_id', Integer()) + new_instance_type_id = Column('new_instance_type_id', Integer()) + migrations.create_column(old_instance_type_id) + migrations.create_column(new_instance_type_id) + + # Convert flavor_id to instance_type_id + itypes = {} + for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.id] = instance_type.flavorid + + for instance_type_id in itypes.keys(): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_flavor_id == itypes[instance_type_id])\ + .values(old_instance_type_id=instance_type_id)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_flavor_id == itypes[instance_type_id])\ + .values(new_instance_type_id=instance_type_id)) + + migrations.c.old_flavor_id.drop() + migrations.c.new_flavor_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + + old_flavor_id = Column('old_flavor_id', Integer()) + new_flavor_id = Column('new_flavor_id', Integer()) + + migrations.create_column(old_flavor_id) + migrations.create_column(new_flavor_id) + + # Convert instance_type_id to flavor_id + itypes = {} + for instance_type in migrate_engine.execute(instance_types.select()): + itypes[instance_type.flavorid] = instance_type.id + + for instance_type_flavorid in itypes.keys(): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_instance_type_id == + itypes[instance_type_flavorid])\ + .values(old_flavor_id=instance_type_flavorid)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_instance_type_id == + itypes[instance_type_flavorid])\ + .values(new_flavor_id=instance_type_flavorid)) + + migrations.c.old_instance_type_id.drop() + migrations.c.new_instance_type_id.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py b/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py new file mode 100644 index 00000000000..c8a1a19274e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/037_instances_drop_admin_pass.py @@ -0,0 +1,42 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, Table, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('admin_pass') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + # + # New Columns + # + admin_pass = Column( + 'admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + instances.create_column(admin_pass) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py b/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py new file mode 100644 index 00000000000..fbd1c45702c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/038_add_uuid_to_virtual_interfaces.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2011 Midokura KK +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + uuid_column = Column('uuid', String(36)) + virtual_interfaces.create_column(uuid_column) + + rows = migrate_engine.execute(virtual_interfaces.select()) + for row in rows: + vif_uuid = str(utils.gen_uuid()) + migrate_engine.execute(virtual_interfaces.update()\ + .where(virtual_interfaces.c.id == row[0])\ + .values(uuid=vif_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + virtual_interfaces.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql new file mode 100644 index 00000000000..0ac66e7e01b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/038_sqlite_downgrade.sql @@ -0,0 +1,63 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id, + uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + network_id, + instance_id + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py b/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py new file mode 100644 index 00000000000..8c8961cd33a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/039_add_instances_accessip.py @@ -0,0 +1,49 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + accessIPv4 = Column( + 'access_ip_v4', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + accessIPv6 = Column( + 'access_ip_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(accessIPv4) + instances.create_column(accessIPv6) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('access_ip_v4') + instances.drop_column('access_ip_v6') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py b/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py new file mode 100644 index 00000000000..7125911d34d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/040_add_uuid_to_networks.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + uuid_column = Column("uuid", String(36)) + networks.create_column(uuid_column) + + rows = migrate_engine.execute(networks.select()) + for row in rows: + networks_uuid = str(utils.gen_uuid()) + migrate_engine.execute(networks.update()\ + .where(networks.c.id == row[0])\ + .values(uuid=networks_uuid)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py new file mode 100644 index 00000000000..f85c4a0d743 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/041_add_config_drive_to_instances.py @@ -0,0 +1,36 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Piston Cloud Computing, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table("instances", meta, autoload=True) + + config_drive_column = Column("config_drive", String(255), nullable=True) + instances.create_column(config_drive_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table("instances", meta, autoload=True) + + instances.drop_column('config_drive') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py b/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py new file mode 100644 index 00000000000..2434bb0abf9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/042_add_volume_types_and_extradata.py @@ -0,0 +1,122 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Boolean, ForeignKey + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + volume_types = Table('volume_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True)) + + volume_type_extra_specs_table = Table('volume_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_type_id', + Integer(), + ForeignKey('volume_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + volume_metadata_table = Table('volume_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + new_tables = (volume_types, + volume_type_extra_specs_table, + volume_metadata_table) + + for table in new_tables: + try: + table.create() + except Exception: + LOG.info(repr(table)) + LOG.exception('Exception while creating table') + raise + + # + # New Columns + # + volume_type_id = Column('volume_type_id', Integer(), nullable=True) + volumes.create_column(volume_type_id) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + + volumes.drop_column('volume_type_id') + + volume_types = Table('volume_types', meta, autoload=True) + volume_type_extra_specs_table = Table('volume_type_extra_specs', + meta, + autoload=True) + volume_metadata_table = Table('volume_metadata', meta, autoload=True) + + # table order matters, don't change + for table in (volume_type_extra_specs_table, + volume_types, + volume_metadata_table): + table.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql new file mode 100644 index 00000000000..8fa39663a23 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/042_sqlite_downgrade.sql @@ -0,0 +1,129 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE volumes_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id, + volume_type_id + FROM volumes; + + DROP TABLE volumes; + + CREATE TABLE volumes ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id VARCHAR(255), + user_id VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(256), + provider_auth VARCHAR(256), + snapshot_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + snapshot_id + FROM volumes_backup; + + DROP TABLE volumes_backup; + + DROP TABLE volume_type_extra_specs; + + DROP TABLE volume_types; + + DROP TABLE volume_metadata; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py new file mode 100644 index 00000000000..1e22608fc0b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Boolean + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + try: + virtual_storage_arrays.create() + except Exception: + LOG.info(repr(virtual_storage_arrays)) + LOG.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + virtual_storage_arrays = Table('virtual_storage_arrays', + meta, + autoload=True) + virtual_storage_arrays.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py new file mode 100644 index 00000000000..95d3b7529ce --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + instance_table.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True) + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + instance_table.drop_column('task_state') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py b/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py new file mode 100644 index 00000000000..3d75803dc53 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py @@ -0,0 +1,44 @@ +# Copyright 2011 Nicira, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + priority = Column('priority', Integer()) + try: + networks.create_column(priority) + except Exception: + LOG.error(_("priority column not added to networks table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + networks = Table('networks', meta, autoload=True) + + networks.drop_column('priority') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py b/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py new file mode 100644 index 00000000000..3ee1c4e7ee5 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/046_add_instance_swap.py @@ -0,0 +1,49 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table, String + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + default_local_device = Column( + 'default_local_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + default_swap_device = Column( + 'default_swap_device', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + instances.create_column(default_local_device) + instances.create_column(default_swap_device) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('default_swap_device') + instances.drop_column('default_local_device') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py b/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py new file mode 100644 index 00000000000..0b365df3122 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py @@ -0,0 +1,61 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + fkey_name = list(vifs.c.instance_id.foreign_keys)[0].constraint.name + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql new file mode 100644 index 00000000000..9bc3ee8d4c0 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql @@ -0,0 +1,46 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql new file mode 100644 index 00000000000..2c0919f1dd9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql @@ -0,0 +1,45 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py b/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py new file mode 100644 index 00000000000..e313fc7dee8 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/048_add_zone_name.py @@ -0,0 +1,33 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + name = Column('name', String(255)) + zones.create_column(name) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py b/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py new file mode 100644 index 00000000000..c19d89e64cf --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + progress = Column('progress', Integer()) + try: + instances.create_column(progress) + except Exception: + LOG.error(_("progress column not added to instances table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('progress') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py new file mode 100644 index 00000000000..d4a2fcc13d7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/050_add_disk_config_to_instances.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + managed_disk = Column("managed_disk", Boolean(create_constraint=False, + name=None)) + instances.create_column(managed_disk) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True) + + instances.drop_column('managed_disk') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql new file mode 100644 index 00000000000..8db7087bc08 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/050_sqlite_downgrade.sql @@ -0,0 +1,207 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + managed_disk BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (managed_disk IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + managed_disk + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)) + ); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py b/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py new file mode 100644 index 00000000000..a338319933b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/051_add_vcpu_weight_to_instance_types.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + vcpu_weight = Column("vcpu_weight", Integer()) + instance_types.create_column(vcpu_weight) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + instance_types.drop_column('vcpu_weight') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py b/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py new file mode 100644 index 00000000000..c71b4eeefee --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/052_kill_export_devices.py @@ -0,0 +1,65 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + try: + export_devices.create() + except Exception: + LOG.info(repr(export_devices)) + LOG.exception('Exception while creating table') + raise + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + export_devices = Table('export_devices', meta, autoload=True) + + export_devices.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py new file mode 100644 index 00000000000..8fe13991849 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/053_add_connection_info_to_block_device_mapping.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, Table, Text + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + table = Table('block_device_mapping', meta, autoload=True) + + new_column = Column('connection_info', Text()) + + table.create_column(new_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + table = Table('block_device_mapping', meta, autoload=True) + + table.c.connection_info.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql new file mode 100644 index 00000000000..84439976367 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/053_sqlite_downgrade.sql @@ -0,0 +1,87 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py new file mode 100644 index 00000000000..765f9cfc179 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py @@ -0,0 +1,64 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData +from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + bw_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger())) + try: + bw_cache.create() + except Exception: + LOG.info(repr(bw_cache)) + LOG.exception('Exception while creating table') + meta.drop_all(tables=[bw_cache]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + bw_cache = Table('bw_usage_cache', meta, autoload=True) + bw_cache.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py b/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py new file mode 100644 index 00000000000..b110b6f208a --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import migrate +import migrate.changeset +from sqlalchemy import Column, Integer, String, MetaData, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + + string_column = Column('flavorid_str', String(255)) + + string_column.create(instance_types) + + try: + # NOTE(bcwaldon): This catches a bug with python-migrate + # failing to add the unique constraint + try: + migrate.UniqueConstraint(string_column).create() + except migrate.changeset.NotSupportedError: + LOG.error("Failed to add unique constraint on flavorid") + pass + + # NOTE(bcwaldon): this is a hack to preserve uniqueness constraint + # on existing 'name' column + try: + migrate.UniqueConstraint(instance_types.c.name).create() + except Exception: + pass + + integer_column = instance_types.c.flavorid + + instance_type_rows = list(instance_types.select().execute()) + for instance_type in instance_type_rows: + flavorid_int = instance_type.flavorid + instance_types.update()\ + .where(integer_column == flavorid_int)\ + .values(flavorid_str=str(flavorid_int))\ + .execute() + except Exception: + string_column.drop() + raise + + integer_column.alter(name='flavorid_int') + string_column.alter(name='flavorid') + integer_column.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_types = Table('instance_types', meta, autoload=True) + + integer_column = Column('flavorid_int', Integer()) + + integer_column.create(instance_types) + + try: + # NOTE(bcwaldon): This catches a bug with python-migrate + # failing to add the unique constraint + try: + migrate.UniqueConstraint(integer_column).create() + except migrate.changeset.NotSupportedError: + LOG.info("Failed to add unique constraint on flavorid") + pass + + string_column = instance_types.c.flavorid + + instance_types_rows = list(instance_types.select().execute()) + for instance_type in instance_types_rows: + flavorid_str = instance_type.flavorid + try: + flavorid_int = int(instance_type.flavorid) + except ValueError: + msg = _('Could not cast flavorid to integer: %s. ' + 'Set flavorid to an integer-like string to downgrade.') + LOG.error(msg % instance_type.flavorid) + raise + + instance_types.update()\ + .where(string_column == flavorid_str)\ + .values(flavorid_int=flavorid_int)\ + .execute() + except Exception: + integer_column.drop() + raise + + string_column.alter(name='flavorid_str') + integer_column.alter(name='flavorid') + string_column.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py b/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py new file mode 100644 index 00000000000..aed52488a2e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/056_add_s3_images.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # + # New Tables + # + s3_images = Table('s3_images', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + s3_images.create() + except Exception: + LOG.exception("Exception while creating table 's3_images'") + meta.drop_all(tables=[s3_images]) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + s3_images = Table('s3_images', meta, autoload=True) + s3_images.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py new file mode 100644 index 00000000000..ea01fc80c6e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/057_add_sm_driver_tables.py @@ -0,0 +1,113 @@ +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + # + # New Tables + # + flavors = Table('sm_flavors', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + backend = Table('sm_backend_config', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('flavor_id', Integer(), ForeignKey('sm_flavors.id'), + nullable=False), + Column('sr_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('sr_type', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('config_params', + String(length=2047, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)), + ) + + sm_vol = Table('sm_volume', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), ForeignKey('volumes.id'), + primary_key=True, nullable=False), + Column('backend_id', Integer(), + ForeignKey('sm_backend_config.id'), + nullable=False), + Column('vdi_uuid', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + for table in (flavors, backend, sm_vol): + try: + table.create() + except Exception: + LOG.info(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + volumes = Table('volumes', meta, autoload=True) + + flavors = Table('sm_flavors', meta, autoload=True) + backend = Table('sm_backend_config', meta, autoload=True) + sm_vol = Table('sm_volume', meta, autoload=True) + + for table in (flavors, backend, sm_vol): + try: + table.drop() + except Exception: + LOG.info(repr(table)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py b/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py new file mode 100644 index 00000000000..e12cabddd01 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/058_rename_managed_disk.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + managed_disk = instances.c.managed_disk + managed_disk.alter(name='auto_disk_config') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + image_ref_column = instances.c.auto_disk_config + image_ref_column.alter(name='managed_disk') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py b/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py new file mode 100644 index 00000000000..58f6d69e08d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_split_rxtx_quota_into_network.py @@ -0,0 +1,61 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, Float, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + rxtx_base = Column('rxtx_base', Integer) + rxtx_factor = Column('rxtx_factor', Float, default=1) + instance_types.create_column(rxtx_factor) + networks.create_column(rxtx_base) + + base = migrate_engine.execute("select min(rxtx_cap) as min_rxtx from " + "instance_types where rxtx_cap > 0").scalar() + base = base if base > 1 else 1 + update_i_type_sql = ("update instance_types set rxtx_factor = rxtx_cap" + "/%s where rxtx_cap > 0" % base) + migrate_engine.execute(update_i_type_sql) + migrate_engine.execute("update networks set rxtx_base = %s" % base) + + instance_types.c.rxtx_quota.drop() + instance_types.c.rxtx_cap.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + rxtx_quota = Column('rxtx_quota', Integer) + rxtx_cap = Column('rxtx_cap', Integer) + instance_types.create_column(rxtx_quota) + instance_types.create_column(rxtx_cap) + + base = migrate_engine.execute("select min(rxtx_base) from networks " + "where rxtx_base > 0").scalar() + base = base if base > 1 else 1 + + update_i_type_sql = ("update instance_types set rxtx_cap = " + "rxtx_factor * %s" % base) + migrate_engine.execute(update_i_type_sql) + + instance_types.c.rxtx_factor.drop() + networks.c.rxtx_base.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql new file mode 100644 index 00000000000..ecf45c599ba --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_downgrade.sql @@ -0,0 +1,137 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_quota INTEGER NOT NULL, + rxtx_cap INTEGER NOT NULL, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + UNIQUE (flavorid), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + 0 as rxtx_quota, + COALESCE(rxtx_factor, 1) * COALESCE ((SELECT MIN(rxtx_base) + FROM networks + WHERE rxtx_base > 0), 1) + as rxtx_cap, + vcpu_weight, + flavorid FROM instance_types; + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_quota INTEGER NOT NULL, + rxtx_cap INTEGER NOT NULL, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types SELECT * FROM instance_types_backup; + DROP TABLE instance_types_backup; + + CREATE TABLE networks_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + injected BOOLEAN, + cidr VARCHAR(255), + netmask VARCHAR(255), + bridge VARCHAR(255), + gateway VARCHAR(255), + broadcast VARCHAR(255), + dns1 VARCHAR(255), + vlan INTEGER, + vpn_public_address VARCHAR(255), + vpn_public_port INTEGER, + vpn_private_address VARCHAR(255), + dhcp_start VARCHAR(255), + project_id VARCHAR(255), + host VARCHAR(255), + cidr_v6 VARCHAR(255), + gateway_v6 VARCHAR(255), + label VARCHAR(255), + netmask_v6 VARCHAR(255), + bridge_interface VARCHAR(255), + multi_host BOOLEAN, + dns2 VARCHAR(255), + uuid VARCHAR(36), + priority INTEGER, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (injected IN (0, 1)), + CHECK (multi_host IN (0, 1)) + ); + + INSERT INTO networks_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + injected, + cidr, + netmask, + bridge, + gateway, + broadcast, + dns1, + vlan, + vpn_public_address, + vpn_public_port, + vpn_private_address, + dhcp_start, + project_id, + host, + cidr_v6, + gateway_v6, + label, + netmask_v6, + bridge_interface, + multi_host, + dns2, + uuid, + priority + FROM networks; + + DROP TABLE networks; + ALTER TABLE networks_backup RENAME TO networks; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql new file mode 100644 index 00000000000..ba7729aedef --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/059_sqlite_upgrade.sql @@ -0,0 +1,87 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + COALESCE(rxtx_cap, 1) / COALESCE ((SELECT MIN(rxtx_cap) + FROM instance_types + WHERE rxtx_cap > 0), 1) as rxtx_cap, + vcpu_weight, + flavorid + FROM instance_types; + + ALTER TABLE networks ADD COLUMN rxtx_base INTEGER DEFAULT 1; + + UPDATE networks SET rxtx_base = COALESCE((SELECT MIN(rxtx_cap) + FROM instance_types + WHERE rxtx_cap>0), 1); + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types_backup; + + DROP TABLE instance_types_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py b/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py new file mode 100644 index 00000000000..83ed1cf6b1f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py @@ -0,0 +1,62 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + fkey_name = list(vifs.c.network_id.foreign_keys)[0].constraint.name + ForeignKeyConstraint(columns=[vifs.c.network_id], + refcolumns=[networks.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[vifs.c.network_id], + refcolumns=[networks.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql new file mode 100644 index 00000000000..2c0919f1dd9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_downgrade.sql @@ -0,0 +1,45 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql new file mode 100644 index 00000000000..fd49ea4f524 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/060_sqlite_upgrade.sql @@ -0,0 +1,44 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py new file mode 100644 index 00000000000..1a369bffec3 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/061_add_index_to_instance_uuid.py @@ -0,0 +1,29 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + Index('uuid', instances.c.uuid, unique=True).create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + Index('uuid', instances.c.uuid, unique=True).drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py new file mode 100644 index 00000000000..f2b0e8a742d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py @@ -0,0 +1,70 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table, Text + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + # + # New Tables + # + instance_info_caches = Table('instance_info_caches', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True), + Column('network_info', Text()), + Column('instance_id', String(36), + ForeignKey('instances.uuid'), + nullable=False, + unique=True), + mysql_engine='InnoDB') + # create instance_info_caches table + try: + instance_info_caches.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(instance_info_caches)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + instances = Table('instances', meta, autoload=True) + + instance_info_caches = Table('instance_info_caches', meta, autoload=True) + try: + instance_info_caches.drop() + except Exception: + LOG.error(_("instance_info_caches tables not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py new file mode 100644 index 00000000000..be4561791cc --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/063_add_instance_faults_table.py @@ -0,0 +1,60 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer, ForeignKey +from sqlalchemy import MetaData, String, Table, Text +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + instance_faults = Table('instance_faults', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None), + default=False), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_uuid', String(36, ForeignKey('instances.uuid'))), + Column('code', Integer(), nullable=False), + Column('message', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('details', + Text(length=None, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + try: + instance_faults.create() + except Exception: + LOG.info(repr(instance_faults)) + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + instance_faults = Table('instance_faults', meta, autoload=True) + instance_faults.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py b/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py new file mode 100644 index 00000000000..bed9151666d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + uuid_column = Column('instance_uuid', String(36)) + uuid_column.create(instance_actions) + + try: + instance_actions.update().values( + instance_uuid=select( + [instances.c.uuid], + instances.c.id == instance_actions.c.instance_id) + ).execute() + except Exception: + uuid_column.drop() + raise + + if not dialect.startswith('sqlite'): + fkeys = list(instance_actions.c.instance_id.foreign_keys) + if fkeys: + try: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[instance_actions.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + instance_actions.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instance_actions = Table('instance_actions', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + id_column = Column('instance_id', Integer, ForeignKey('instances.id')) + id_column.create(instance_actions) + + try: + instance_actions.update().values( + instance_id=select( + [instances.c.id], + instances.c.uuid == instance_actions.c.instance_uuid) + ).execute() + except Exception: + id_column.drop() + raise + + instance_actions.c.instance_uuid.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py new file mode 100644 index 00000000000..9b27f39dcc6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/065_add_index_to_instance_project_id.py @@ -0,0 +1,31 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + index = Index('project_id', instances.c.project_id) + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + index = Index('project_id', instances.c.project_id) + index.drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py new file mode 100644 index 00000000000..b2df1bbe25b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/066_preload_instance_info_cache_table.py @@ -0,0 +1,31 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from sqlalchemy import select, MetaData, Table + +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + pass + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py b/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py new file mode 100644 index 00000000000..61adb8fa423 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/067_add_pool_and_interface_to_floating_ip.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + +from cinder import flags + +FLAGS = flags.FLAGS + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + table = Table('floating_ips', meta, autoload=True) + + pool_column = Column('pool', String(255)) + interface_column = Column('interface', String(255)) + table.create_column(pool_column) + table.create_column(interface_column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + table = Table('floating_ips', meta, autoload=True) + table.c.pool.drop() + table.c.interface.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql new file mode 100644 index 00000000000..3cd12cbdc2f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/067_sqlite_downgrade.sql @@ -0,0 +1,69 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned, + pool, + interface + FROM floating_ips; + + DROP TABLE floating_ips; + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO floating_ips + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + address, + fixed_ip_id, + project_id, + host, + auto_assigned + FROM floating_ips_backup; + + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py b/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py new file mode 100644 index 00000000000..a65aff8b426 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/068_add_instance_attribute.py @@ -0,0 +1,36 @@ +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean +from sqlalchemy import Column, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + shutdown_terminate = Column( + 'shutdown_terminate', Boolean(), default=True) + disable_terminate = Column( + 'disable_terminate', Boolean(), default=False) + instances.create_column(shutdown_terminate) + instances.create_column(disable_terminate) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instances.drop_column('shutdown_terminate') + instances.drop_column('disable_terminate') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql new file mode 100644 index 00000000000..a7700f6fab7 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/068_sqlite_downgrade.sql @@ -0,0 +1,219 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)) + ); + + CREATE INDEX project_id ON instances (project_id); + CREATE UNIQUE INDEX uuid ON instances (uuid); + + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py b/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py new file mode 100644 index 00000000000..e9984be28b8 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py @@ -0,0 +1,50 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + disk_available_least = Column('disk_available_least', Integer(), default=0) + compute_nodes = Table('compute_nodes', meta, autoload=True) + # Add disk_available_least column to compute_nodes table. + # Thinking about qcow2 image support, both compressed and virtual disk size + # has to be considered. + # disk_available stores "total disk size - used disk(compressed disk size)" + # while disk_available_least stores + # "total disk size - used disk(virtual disk size)". + # virtual disk size is used for kvm block migration. + try: + compute_nodes.create_column(disk_available_least) + except Exception: + LOG.error(_("progress column not added to compute_nodes table")) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + compute_nodes = Table('compute_nodes', meta, autoload=True) + compute_nodes.drop_column('disk_available_least') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql new file mode 100644 index 00000000000..5837603c866 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_downgrade.sql @@ -0,0 +1,103 @@ +BEGIN TRANSACTION; + CREATE TABLE fixed_ips_backup ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips; + + INSERT INTO floating_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips; + + DROP TABLE fixed_ips; + DROP TABLE floating_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(fixed_ip_id) REFERENCES fixed_ips (id) + ); + + INSERT INTO fixed_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips_backup; + + INSERT INTO floating_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips_backup; + + DROP TABLE fixed_ips_backup; + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql new file mode 100644 index 00000000000..2b6f7c39a40 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_sqlite_upgrade.sql @@ -0,0 +1,99 @@ +BEGIN TRANSACTION; + CREATE TABLE fixed_ips_backup ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips; + + INSERT INTO floating_ips_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips; + + DROP TABLE fixed_ips; + DROP TABLE floating_ips; + + CREATE TABLE fixed_ips ( + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + host VARCHAR(255), + PRIMARY KEY (id) + ); + + CREATE TABLE floating_ips ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + fixed_ip_id INTEGER, + project_id VARCHAR(255), + host VARCHAR(255), + auto_assigned BOOLEAN, + pool VARCHAR(255), + interface VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO fixed_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + virtual_interface_id, network_id, instance_id, allocated, + leased, reserved, host + FROM fixed_ips_backup; + + INSERT INTO floating_ips + SELECT created_at, updated_at, deleted_at, deleted, id, address, + fixed_ip_id, project_id, host, auto_assigned, pool, + interface + FROM floating_ips_backup; + + DROP TABLE fixed_ips_backup; + DROP TABLE floating_ips_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py b/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py new file mode 100644 index 00000000000..0316194b326 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/070_untie_nova_network_models.py @@ -0,0 +1,100 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + + try: + fkeys = list(fixed_ips.c.network_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.network_id], + refcolumns=[networks.c.id], + name=fkey_name).drop() + + fkeys = list(fixed_ips.c.virtual_interface_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[vifs.c.id], + name=fkey_name).drop() + + fkeys = list(fixed_ips.c.instance_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[fixed_ips.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + + fkeys = list(floating_ips.c.fixed_ip_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id], + refcolumns=[fixed_ips.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + floating_ips = Table('floating_ips', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[fixed_ips.c.network_id], + refcolumns=[networks.c.id]).create() + + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[vifs.c.id]).create() + + ForeignKeyConstraint(columns=[fixed_ips.c.instance_id], + refcolumns=[instances.c.id]).create() + + ForeignKeyConstraint(columns=[floating_ips.c.fixed_ip_id], + refcolumns=[fixed_ips.c.id]).create() + except Exception: + LOG.error(_("foreign key constraint couldn't be added")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py b/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py new file mode 100644 index 00000000000..d85c3bad0a4 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/071_add_host_aggregate_tables.py @@ -0,0 +1,108 @@ +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, String, DateTime, Integer +from sqlalchemy import MetaData, Column, ForeignKey, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + # + # New Tables + # + aggregates = Table('aggregates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, nullable=False, autoincrement=True), + Column('name', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('operational_state', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + hosts = Table('aggregate_hosts', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('aggregate_id', Integer(), ForeignKey('aggregates.id'), + nullable=False), + ) + + metadata = Table('aggregate_metadata', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('aggregate_id', + Integer(), + ForeignKey('aggregates.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('value', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False)) + tables = (aggregates, hosts, metadata) + for table in tables: + try: + table.create() + except Exception: + LOG.exception(repr(table)) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + aggregates = Table('aggregates', meta, autoload=True) + hosts = Table('aggregate_hosts', meta, autoload=True) + metadata = Table('aggregate_metadata', meta, autoload=True) + # table order matters, don't change + for table in (hosts, metadata, aggregates): + try: + table.drop() + except Exception: + LOG.exception(repr(table)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py b/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py new file mode 100644 index 00000000000..5c1644d579b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py @@ -0,0 +1,77 @@ +# Copyright 2012 Andrew Bogott for The Wikimedia Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import MetaData, String, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + projects = Table('projects', meta, autoload=True) + + # + # New Tables + # + dns_domains = Table('dns_domains', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('domain', + String(length=512, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, nullable=False), + Column('scope', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id')) + ) + # create dns_domains table + try: + dns_domains.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(dns_domains)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # load tables for fk + projects = Table('projects', meta, autoload=True) + + dns_domains = Table('dns_domains', meta, autoload=True) + try: + dns_domains.drop() + except Exception: + LOG.error(_("dns_domains table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql new file mode 100644 index 00000000000..d11e8214788 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/072_mysql_upgrade.sql @@ -0,0 +1,13 @@ +CREATE TABLE dns_domains ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + domain VARCHAR(512) CHARACTER SET latin1 NOT NULL, + scope VARCHAR(255), + availability_zone VARCHAR(255), + project_id VARCHAR(255), + PRIMARY KEY (domain), + CHECK (deleted IN (0, 1)), + FOREIGN KEY(project_id) REFERENCES projects (id) +); diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py b/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py new file mode 100644 index 00000000000..31ed41581ed --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/073_add_capacity.py @@ -0,0 +1,49 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table + +from cinder import log as logging + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table('compute_nodes', meta, autoload=True) + + # + # New Columns + # + new_columns = [ + Column('free_ram_mb', Integer()), + Column('free_disk_gb', Integer()), + Column('current_workload', Integer()), + Column('running_vms', Integer()), + ] + for column in new_columns: + compute_nodes.create_column(column) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table('compute_nodes', meta, autoload=True) + + for column in ('free_ram_mb', + 'free_disk_gb', + 'current_workload', + 'running_vms'): + compute_nodes.drop_column(column) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py b/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py new file mode 100644 index 00000000000..a371aa6963f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, Integer, MetaData, Table + +from cinder import exception +from cinder import flags + +FLAGS = flags.FLAGS + + +def upgrade_libvirt(instances, instance_types): + # Update instance_types first + tiny = None + for inst_type in instance_types.select().execute(): + if inst_type['name'] == 'm1.tiny': + tiny = inst_type['id'] + root_gb = 0 + else: + root_gb = 10 + + instance_types.update()\ + .values(root_gb=root_gb, + ephemeral_gb=inst_type['local_gb'])\ + .where(instance_types.c.id == inst_type['id'])\ + .execute() + + # then update instances following same pattern + instances.update()\ + .values(root_gb=10, + ephemeral_gb=instances.c.local_gb)\ + .execute() + + if tiny is not None: + instances.update()\ + .values(root_gb=0, + ephemeral_gb=instances.c.local_gb)\ + .where(instances.c.instance_type_id == tiny)\ + .execute() + + +def upgrade_other(instances, instance_types): + for table in (instances, instance_types): + table.update().values(root_gb=table.c.local_gb, + ephemeral_gb=0).execute() + + +def check_instance_presence(migrate_engine, instances_table): + result = migrate_engine.execute(instances_table.select().limit(1)) + return result.fetchone() is not None + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + + data_present = check_instance_presence(migrate_engine, instances) + + if data_present and not FLAGS.connection_type: + msg = ("Found instance records in database. You must specify " + "connection_type to run migration migration") + raise exception.Error(msg) + + instance_types = Table('instance_types', meta, autoload=True) + + for table in (instances, instance_types): + root_gb = Column('root_gb', Integer) + root_gb.create(table) + ephemeral_gb = Column('ephemeral_gb', Integer) + ephemeral_gb.create(table) + + # Since this migration is part of the work to get all drivers + # working the same way, we need to treat the new root_gb and + # ephemeral_gb columns differently depending on what the + # driver implementation used to behave like. + if FLAGS.connection_type == 'libvirt': + upgrade_libvirt(instances, instance_types) + else: + upgrade_other(instances, instance_types) + + default_local_device = instances.c.default_local_device + default_local_device.alter(name='default_ephemeral_device') + + for table in (instances, instance_types): + table.drop_column('local_gb') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instance_types = Table('instance_types', meta, autoload=True) + + for table in (instances, instance_types): + local_gb = Column('local_gb', Integer) + local_gb.create(table) + + try: + for table in (instances, instance_types): + if FLAGS.connection_type == 'libvirt': + column = table.c.ephemeral_gb + else: + column = table.c.root_gb + table.update().values(local_gb=column).execute() + except Exception: + for table in (instances, instance_types): + table.drop_column('local_gb') + raise + + default_ephemeral_device = instances.c.default_ephemeral_device + default_ephemeral_device.alter(name='default_local_device') + + for table in (instances, instance_types): + table.drop_column('root_gb') + table.drop_column('ephemeral_gb') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql new file mode 100644 index 00000000000..e2708111b4c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/074_sqlite_upgrade.sql @@ -0,0 +1,313 @@ +-- sqlalchemy-migrate is surprisingly broken when it comes to migrations +-- for sqlite. As a result, we have to do much of the work manually here + +BEGIN TRANSACTION; + -- make backup of instance_types + CREATE TEMPORARY TABLE instance_types_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + local_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + INSERT INTO instance_types_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types; + + DROP TABLE instance_types; + + CREATE TABLE instance_types ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + root_gb INTEGER NOT NULL, + ephemeral_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (flavorid), + CHECK (deleted IN (0, 1)), + UNIQUE (name) + ); + + -- copy from backup to new table with root_gb set to local_gb and + -- ephmeral_gb set to 0 + INSERT INTO instance_types + SELECT created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + local_gb, + 0, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types_backup; + + DROP TABLE instance_types_backup; + + -- make backup of instances + CREATE TEMPORARY TABLE instances_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + local_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_local_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + INSERT INTO instances_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances; + + DROP TABLE instances; + + CREATE TABLE instances ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + internal_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + image_ref VARCHAR(255), + kernel_id VARCHAR(255), + ramdisk_id VARCHAR(255), + server_name VARCHAR(255), + launch_index INTEGER, + key_name VARCHAR(255), + key_data TEXT, + power_state INTEGER, + vm_state VARCHAR(255), + memory_mb INTEGER, + vcpus INTEGER, + root_gb INTEGER, + ephemeral_gb INTEGER, + hostname VARCHAR(255), + host VARCHAR(255), + user_data TEXT, + reservation_id VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + availability_zone VARCHAR(255), + locked BOOLEAN, + os_type VARCHAR(255), + launched_on TEXT, + instance_type_id INTEGER, + vm_mode VARCHAR(255), + uuid VARCHAR(36), + architecture VARCHAR(255), + root_device_name VARCHAR(255), + access_ip_v4 VARCHAR(255), + access_ip_v6 VARCHAR(255), + config_drive VARCHAR(255), + task_state VARCHAR(255), + default_ephemeral_device VARCHAR(255), + default_swap_device VARCHAR(255), + progress INTEGER, + auto_disk_config BOOLEAN, + shutdown_terminate BOOLEAN, + disable_terminate BOOLEAN, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)), + CHECK (locked IN (0, 1)), + CHECK (auto_disk_config IN (0, 1)), + CHECK (shutdown_terminate IN (0, 1)), + CHECK (disable_terminate IN (0, 1)) + ); + + CREATE INDEX project_id ON instances (project_id); + CREATE UNIQUE INDEX uuid ON instances (uuid); + + -- copy from backup to new table with root_gb set to local_gb and + -- ephmeral_gb set to 0 + INSERT INTO instances + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + internal_id, + user_id, + project_id, + image_ref, + kernel_id, + ramdisk_id, + server_name, + launch_index, + key_name, + key_data, + power_state, + vm_state, + memory_mb, + vcpus, + local_gb, + 0, + hostname, + host, + user_data, + reservation_id, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + availability_zone, + locked, + os_type, + launched_on, + instance_type_id, + vm_mode, + uuid, + architecture, + root_device_name, + access_ip_v4, + access_ip_v6, + config_drive, + task_state, + default_local_device, + default_swap_device, + progress, + auto_disk_config, + shutdown_terminate, + disable_terminate + FROM instances_backup; + + DROP TABLE instances_backup; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py b/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py new file mode 100644 index 00000000000..3d26204f069 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/075_convert_bw_usage_to_store_network_id.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import and_, select +from sqlalchemy import BigInteger, Boolean, Column, DateTime +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + +from cinder import utils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + vifs = Table('virtual_interfaces', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('network_label', + String(length=255, convert_unicode=False, + assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + mac_column = Column('mac', String(255)) + bw_usage_cache.create_column(mac_column) + + bw_usage_cache.update()\ + .values(mac=select([vifs.c.address])\ + .where(and_( + networks.c.label == bw_usage_cache.c.network_label, + networks.c.id == vifs.c.network_id, + bw_usage_cache.c.instance_id == vifs.c.instance_id))\ + .as_scalar()).execute() + + bw_usage_cache.c.network_label.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + vifs = Table('virtual_interfaces', meta, autoload=True) + network = Table('networks', meta, autoload=True) + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('mac', String(255)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + network_label_column = Column('network_label', String(255)) + bw_usage_cache.create_column(network_label_column) + + bw_usage_cache.update()\ + .values(network_label=select([network.c.label])\ + .where(and_( + network.c.id == vifs.c.network_id, + vifs.c.address == bw_usage_cache.c.mac, + bw_usage_cache.c.instance_id == vifs.c.instance_id))\ + .as_scalar()).execute() + + bw_usage_cache.c.mac.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py b/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py new file mode 100644 index 00000000000..971bfbecc2e --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/076_remove_unique_constraints.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import MetaData, Table +from migrate.changeset.constraint import UniqueConstraint + + +def _get_constraint_names(engine_name): + + # NOTE(vish): These constraint names may be dependent on the backend, but + # there doesn't seem to be we a way to determine the proper + # name for existing constraints. These names are correct for + # mysql and postgres. + if engine_name == "mysql": + return { + "instance_types_name": ("name", "instance_types_name_key"), + "instance_types_flavorid": "instance_types_flavorid_str_key", + "volume_types_name": "name", + } + else: + return { + "instance_types_name": ("instance_types_name_key",), + "instance_types_flavorid": "instance_types_flavorid_str_key", + "volume_types_name": "volume_types_name_key", + } + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + c_names = _get_constraint_names(migrate_engine.name) + + table = Table('instance_types', meta, autoload=True) + for constraint_name in c_names['instance_types_name']: + cons = UniqueConstraint('name', + name=constraint_name, + table=table) + cons.drop() + cons = UniqueConstraint('flavorid', + name=c_names['instance_types_flavorid'], + table=table) + cons.drop() + table = Table('volume_types', meta, autoload=True) + cons = UniqueConstraint('name', + name=c_names['volume_types_name'], + table=table) + cons.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + c_names = _get_constraint_names(migrate_engine.name) + + table = Table('instance_types', meta, autoload=True) + for constraint_name in c_names['instance_types_name']: + cons = UniqueConstraint('name', + name=constraint_name, + table=table) + cons.create() + table = Table('instance_types', meta, autoload=True) + cons = UniqueConstraint('flavorid', + name=c_names['instance_types_flavorid'], + table=table) + cons.create() + table = Table('volume_types', meta, autoload=True) + cons = UniqueConstraint('name', + name=c_names['volume_types_name'], + table=table) + cons.create() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql new file mode 100644 index 00000000000..6053c1ed74d --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/076_sqlite_upgrade.sql @@ -0,0 +1,61 @@ +-- sqlalchemy-migrate is surprisingly broken when it comes to migrations +-- for sqlite. As a result, we have to do much of the work manually here + +BEGIN TRANSACTION; + CREATE TABLE instance_types_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + memory_mb INTEGER NOT NULL, + vcpus INTEGER NOT NULL, + root_gb INTEGER NOT NULL, + ephemeral_gb INTEGER NOT NULL, + swap INTEGER NOT NULL, + rxtx_factor FLOAT, + vcpu_weight INTEGER, + flavorid VARCHAR(255), + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO instance_types_temp SELECT + created_at, + updated_at, + deleted_at, + deleted, + name, + id, + memory_mb, + vcpus, + root_gb, + ephemeral_gb, + swap, + rxtx_factor, + vcpu_weight, + flavorid + FROM instance_types; + DROP TABLE instance_types; + ALTER TABLE instance_types_temp RENAME TO instance_types; + CREATE TABLE volume_types_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + name VARCHAR(255), + id INTEGER NOT NULL, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_types_temp SELECT + created_at, + updated_at, + deleted_at, + deleted, + name, + id + FROM volume_types; + DROP TABLE volume_types; + ALTER TABLE volume_types_temp RENAME TO volume_types; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py b/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py new file mode 100644 index 00000000000..4c08e2f0dc6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/077_convert_to_utf8.py @@ -0,0 +1,61 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # NOTE (ironcamel): The only table we are not converting to utf8 here is + # dns_domains. This table has a primary key that is 512 characters wide. + # When the mysql engine attempts to convert it to utf8, it complains about + # not supporting key columns larger than 1000. + + if migrate_engine.name == "mysql": + tables = [ + # tables that are FK parents, must be converted early + "aggregates", "console_pools", "instance_types", "instances", + "projects", "security_groups", "sm_backend_config", "sm_flavors", + "snapshots", "user_project_association", "users", "volume_types", + "volumes", + # those that are children and others later + "agent_builds", "aggregate_hosts", "aggregate_metadata", + "auth_tokens", "block_device_mapping", "bw_usage_cache", + "certificates", "compute_nodes", "consoles", "fixed_ips", + "floating_ips", "instance_actions", "instance_faults", + "instance_info_caches", "instance_metadata", + "instance_type_extra_specs", "iscsi_targets", "key_pairs", + "migrate_version", "migrations", "networks", "provider_fw_rules", + "quotas", "s3_images", "security_group_instance_association", + "security_group_rules", "services", "sm_volume", + "user_project_role_association", "user_role_association", + "virtual_interfaces", "virtual_storage_arrays", "volume_metadata", + "volume_type_extra_specs", "zones"] + sql = "SET foreign_key_checks = 0;" + for table in tables: + sql += "ALTER TABLE %s CONVERT TO CHARACTER SET utf8;" % table + sql += "SET foreign_key_checks = 1;" + sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" \ + % migrate_engine.url.database + migrate_engine.execute(sql) + + +def downgrade(migrate_engine): + # utf8 tables should be backwards compatible, so lets leave it alone + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py new file mode 100644 index 00000000000..e4043f84dec --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/078_add_rpc_info_to_zones.py @@ -0,0 +1,46 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column +from sqlalchemy import Integer, MetaData, String +from sqlalchemy import Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + is_parent = Column('is_parent', Boolean(), default=False) + rpc_host = Column('rpc_host', String(255)) + rpc_port = Column('rpc_port', Integer()) + rpc_virtual_host = Column('rpc_virtual_host', String(255)) + + zones.create_column(is_parent) + zones.create_column(rpc_host) + zones.create_column(rpc_port) + zones.create_column(rpc_virtual_host) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + zones = Table('zones', meta, autoload=True) + + zones.drop_column('rpc_virtual_host') + zones.drop_column('rpc_port') + zones.drop_column('rpc_host') + zones.drop_column('is_parent') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql new file mode 100644 index 00000000000..80061af78b9 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/078_sqlite_downgrade.sql @@ -0,0 +1,35 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE zones_temp ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + name VARCHAR(255), + api_url VARCHAR(255), + username VARCHAR(255), + password VARCHAR(255), + weight_offset FLOAT, + weight_scale FLOAT, + PRIMARY KEY (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO zones_temp + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + name, + api_url, + username, + password, + weight_offset, + weight_scale FROM zones; + + DROP TABLE zones; + + ALTER TABLE zones_temp RENAME TO zones; +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py new file mode 100644 index 00000000000..2b22b94a020 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/079_add_zone_name_to_instances.py @@ -0,0 +1,30 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + zone_name = Column('zone_name', String(255)) + instances.create_column(zone_name) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + instances.drop_column('zone_name') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py b/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py new file mode 100644 index 00000000000..28a3ce48f1c --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/080_add_hypervisor_hostname_to_compute_nodes.py @@ -0,0 +1,30 @@ +# Copyright 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Column, MetaData, String, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table("compute_nodes", meta, autoload=True) + hypervisor_hostname = Column("hypervisor_hostname", String(255)) + compute_nodes.create_column(hypervisor_hostname) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + compute_nodes = Table("compute_nodes", meta, autoload=True) + compute_nodes.drop_column('hypervisor_hostname') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py new file mode 100644 index 00000000000..c6687ac8074 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/081_drop_instance_id_bw_cache.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import json + +from sqlalchemy import Column, Table, MetaData, Integer, Boolean, String +from sqlalchemy import DateTime, BigInteger + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', Integer(), nullable=False), + Column('mac', String(255)), + Column('start_period', DateTime(timezone=False), + nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + bw_usage_cache.drop_column('instance_id') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + instance_info_caches = Table('instance_info_caches', meta, autoload=True) + bw_usage_cache = Table('bw_usage_cache', meta, autoload=True) + + instance_id = Column('instance_id', Integer) + bw_usage_cache.create_column(instance_id) + + cache = {} + for row in migrate_engine.execute(instance_info_caches.select()): + instance_id = row['instance']['id'] + if not row['network_info']: + continue + + nw_info = json.loads(row['network_info']) + for vif in nw_info: + cache[vif['address']] = instance_id + + for row in migrate_engine.execute(bw_usage_cache.select()): + instance_id = cache[row['mac']] + migrate_engine.execute(bw_usage_cache.update()\ + .where(bw_usage_cache.c.id == row['id'])\ + .values(instance_id=instance_id)) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py b/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py new file mode 100644 index 00000000000..79e99503af0 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/082_zone_to_cell.py @@ -0,0 +1,35 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + zone_name = instances.c.zone_name + zone_name.alter(name='cell_name') + zones = Table('zones', meta, autoload=True) + zones.rename('cells') + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True) + cell_name = instances.c.cell_name + cell_name.alter(name='zone_name') + cells = Table('cells', meta, autoload=True) + cells.rename('zones') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py b/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py new file mode 100644 index 00000000000..3869c6ab57b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py @@ -0,0 +1,61 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime +from sqlalchemy import MetaData, Integer, String, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # New table + quota_classes = Table('quota_classes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True), + Column('class_name', + String(length=255, convert_unicode=True, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), index=True), + Column('resource', + String(length=255, convert_unicode=True, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('hard_limit', Integer(), nullable=True), + ) + + try: + quota_classes.create() + except Exception: + LOG.error(_("Table |%s| not created!"), repr(quota_classes)) + raise + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + quota_classes = Table('quota_classes', meta, autoload=True) + try: + quota_classes.drop() + except Exception: + LOG.error(_("quota_classes table not dropped")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py b/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py new file mode 100644 index 00000000000..bcbc2db9013 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/084_quotas_unlimited.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import migrate +import sqlalchemy + + +def upgrade(migrate_engine): + """Map quotas hard_limit from NULL to -1""" + _migrate_unlimited(migrate_engine, None, -1) + + +def downgrade(migrate_engine): + """Map quotas hard_limit from -1 to NULL""" + _migrate_unlimited(migrate_engine, -1, None) + + +def _migrate_unlimited(migrate_engine, old_limit, new_limit): + meta = sqlalchemy.MetaData() + meta.bind = migrate_engine + + def _migrate(table_name): + table = sqlalchemy.Table(table_name, meta, autoload=True) + table.update().\ + where(table.c.hard_limit == old_limit).\ + values(hard_limit=new_limit).execute() + + _migrate('quotas') + _migrate('quota_classes') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py b/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py new file mode 100644 index 00000000000..8c4f0d5c307 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/085_add_index_to_fixed_ips_by_address.py @@ -0,0 +1,31 @@ +# Copyright 2012 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Index, MetaData, Table + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('fixed_ips', meta, autoload=True) + index = Index('address', instances.c.address) + index.create(migrate_engine) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + instances = Table('fixed_ips', meta, autoload=True) + index = Index('address', instances.c.address) + index.drop(migrate_engine) diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py b/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py new file mode 100644 index 00000000000..da985b95613 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/086_set_engine_mysql_innodb.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + + tables = ["agent_builds", "aggregate_hosts", "aggregate_metadata", + "aggregates", "block_device_mapping", "bw_usage_cache", + "dns_domains", "instance_faults", "instance_type_extra_specs", + "provider_fw_rules", "quota_classes", "s3_images", + "sm_backend_config", "sm_flavors", "sm_volume", + "virtual_storage_arrays", "volume_metadata", + "volume_type_extra_specs", "volume_types"] + + meta = MetaData() + meta.bind = migrate_engine + if migrate_engine.name == "mysql": + d = migrate_engine.execute("SHOW TABLE STATUS WHERE Engine!='InnoDB';") + for row in d.fetchall(): + table_name = row[0] + if table_name in tables: + migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % + table_name) + + +def downgrade(migrate_engine): + pass diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py b/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py new file mode 100644 index 00000000000..e66004b12b6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/087_add_uuid_to_bw_usage_cache.py @@ -0,0 +1,56 @@ +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, BigInteger +from sqlalchemy import MetaData, Integer, String, Table + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # add column: + bw_usage_cache = Table('bw_usage_cache', meta, autoload=True) + uuid = Column('uuid', String(36)) + + # clear the cache to get rid of entries with no uuid + migrate_engine.execute(bw_usage_cache.delete()) + + bw_usage_cache.create_column(uuid) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + # drop column: + bw_usage_cache = Table('bw_usage_cache', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('mac', String(255)), + Column('uuid', String(36)), + Column('start_period', DateTime(timezone=False), nullable=False), + Column('last_refreshed', DateTime(timezone=False)), + Column('bw_in', BigInteger()), + Column('bw_out', BigInteger()), + useexisting=True) + + bw_usage_cache.drop_column('uuid') diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py b/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py new file mode 100644 index 00000000000..4962b2b054f --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2012 Michael Still and Canonical Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import select, Column, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + uuid_column = Column('instance_uuid', String(36)) + uuid_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_uuid=select( + [instances.c.uuid], + instances.c.id == block_device_mapping.c.instance_id) + ).execute() + except Exception: + uuid_column.drop() + raise + + fkeys = list(block_device_mapping.c.instance_id.foreign_keys) + if fkeys: + try: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint( + columns=[block_device_mapping.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + except Exception: + LOG.error(_("foreign key constraint couldn't be removed")) + raise + + block_device_mapping.c.instance_id.drop() + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + instances = Table('instances', meta, autoload=True) + id_column = Column('instance_id', Integer, ForeignKey('instances.id')) + id_column.create(block_device_mapping) + + try: + block_device_mapping.update().values( + instance_id=select( + [instances.c.id], + instances.c.uuid == block_device_mapping.c.instance_uuid) + ).execute() + except Exception: + id_column.drop() + raise + + block_device_mapping.c.instance_uuid.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql new file mode 100644 index 00000000000..3699ce9abec --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_downgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + NULL, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_id= + (SELECT id + FROM instances + WHERE block_device_mapping_backup.instance_uuid = instances.uuid + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; \ No newline at end of file diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql new file mode 100644 index 00000000000..d75d2ffa216 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/088_sqlite_upgrade.sql @@ -0,0 +1,97 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_id) REFERENCES instances (id) + ); + + INSERT INTO block_device_mapping_backup + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + instance_id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + NULL + FROM block_device_mapping; + + UPDATE block_device_mapping_backup + SET instance_uuid= + (SELECT uuid + FROM instances + WHERE block_device_mapping_backup.instance_id = instances.id + ); + + DROP TABLE block_device_mapping; + + CREATE TABLE block_device_mapping ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + device_name VARCHAR(255) NOT NULL, + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info TEXT, + instance_uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots (id), + CHECK (deleted IN (0, 1)), + CHECK (delete_on_termination IN (0, 1)), + CHECK (no_device IN (0, 1)), + FOREIGN KEY(volume_id) REFERENCES volumes (id), + FOREIGN KEY(instance_uuid) REFERENCES instances (uuid) + ); + + INSERT INTO block_device_mapping + SELECT created_at, + updated_at, + deleted_at, + deleted, + id, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info, + instance_uuid + FROM block_device_mapping_backup; + + DROP TABLE block_device_mapping_backup; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py b/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py new file mode 100644 index 00000000000..11bc25b0158 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/089_add_volume_id_mappings.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Build mapping tables for our volume uuid migration. + + These mapping tables serve two purposes: + 1. Provide a method for downgrade after UUID conversion + 2. Provide a uuid to associate with existing volumes and snapshots + when we do the actual datatype migration from int to uuid + + """ + meta = MetaData() + meta.bind = migrate_engine + + volume_id_mappings = Table('volume_id_mappings', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + volume_id_mappings.create() + except Exception: + LOG.exception("Exception while creating table 'volume_id_mappings'") + meta.drop_all(tables=[volume_id_mappings]) + raise + + snapshot_id_mappings = Table('snapshot_id_mappings', meta, + Column('created_at', + DateTime(timezone=False)), + Column('updated_at', + DateTime(timezone=False)), + Column('deleted_at', + DateTime(timezone=False)), + Column('deleted', + Boolean(create_constraint=True, name=None)), + Column('id', Integer(), + primary_key=True, + nullable=False, + autoincrement=True), + Column('uuid', String(36), + nullable=False)) + try: + snapshot_id_mappings.create() + except Exception: + LOG.exception("Exception while creating table 'snapshot_id_mappings'") + meta.drop_all(tables=[snapshot_id_mappings]) + raise + + if migrate_engine.name == "mysql": + migrate_engine.execute("ALTER TABLE volume_id_mappings Engine=InnoDB") + migrate_engine.execute("ALTER TABLE snapshot_id_mappings "\ + "Engine=InnoDB") + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + volume_id_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + old_id = v['id'] + new_id = utils.gen_uuid() + row = volume_id_mappings.insert() + row.execute({'id': old_id, + 'uuid': str(new_id)}) + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + old_id = s['id'] + new_id = utils.gen_uuid() + row = snapshot_id_mappings.insert() + row.execute({'id': old_id, + 'uuid': str(new_id)}) + + +def downgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + volume_id_mappings = Table('volume_id_mappings', meta, autoload=True) + volume_id_mappings.drop() + + snapshot_id_mappings = Table('snapshot_id_mappings', meta, autoload=True) + snapshot_id_mappings.drop() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py b/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py new file mode 100644 index 00000000000..7887cd88e8b --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py @@ -0,0 +1,239 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Integer +from sqlalchemy import MetaData, String, Table +from migrate import ForeignKeyConstraint +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume and snapshot id columns from int to varchar.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + if dialect.startswith('sqlite'): + return + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + sm_volume = Table('sm_volume', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], + refcolumns=[snapshots.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + volumes.c.id.alter(String(36), primary_key=True) + volumes.c.snapshot_id.alter(String(36)) + volume_metadata.c.volume_id.alter(String(36), nullable=False) + snapshots.c.id.alter(String(36), primary_key=True) + snapshots.c.volume_id.alter(String(36)) + sm_volume.c.id.alter(String(36)) + block_device_mapping.c.volume_id.alter(String(36)) + block_device_mapping.c.snapshot_id.alter(String(36)) + iscsi_targets.c.volume_id.alter(String(36), nullable=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + # NOTE(jdg) We're intentionally leaving off FK's on BDM + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + """Convert volume and snapshot id columns back to int.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + if dialect.startswith('sqlite'): + return + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + sm_volume = Table('sm_volume', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise + + volumes.c.id.alter(Integer, primary_key=True, autoincrement=True) + volumes.c.snapshot_id.alter(Integer) + volume_metadata.c.volume_id.alter(Integer, nullable=False) + snapshots.c.id.alter(Integer, primary_key=True, autoincrement=True) + snapshots.c.volume_id.alter(Integer) + sm_volume.c.id.alter(Integer) + block_device_mapping.c.volume_id.alter(Integer) + block_device_mapping.c.snapshot_id.alter(Integer) + iscsi_targets.c.volume_id.alter(Integer, nullable=True) + + try: + fkeys = list(snapshots.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[snapshots.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(iscsi_targets.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[iscsi_targets.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(volume_metadata.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[volume_metadata.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + fkeys = list(sm_volume.c.id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[sm_volume.c.id], + refcolumns=[volumes.c.id], + name=fkey_name).create() + + # NOTE(jdg) Put the BDM foreign keys back in place + fkeys = list(block_device_mapping.c.volume_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.volume_id], + refcolumns=[volumes.c.id], + name=fkey_name).drop() + + fkeys = list(block_device_mapping.c.snapshot_id.foreign_keys) + if fkeys: + fkey_name = fkeys[0].constraint.name + ForeignKeyConstraint(columns=[block_device_mapping.c.snapshot_id], + refcolumns=[snapshots.c.id], + name=fkey_name).drop() + + except Exception: + LOG.error(_("Foreign Key constraint couldn't be removed")) + raise diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql new file mode 100644 index 00000000000..7d89da247b3 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_downgrade.sql @@ -0,0 +1,226 @@ +BEGIN TRANSACTION; + + -- change id and snapshot_id datatypes in volumes table + CREATE TABLE volumes_backup( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(255), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id + FROM volumes; + DROP TABLE volumes; + ALTER TABLE volumes_backup RENAME TO volumes; + + -- change id and volume_id datatypes in snapshots table + CREATE TABLE snapshots_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + volume_id INTEGER, + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO snapshots_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + user_id, + project_id, + volume_id, + status, + progress, + volume_size, + display_name, + display_description + FROM snapshots; + DROP TABLE snapshots; + ALTER TABLE snapshots_backup RENAME TO snapshots; + + -- change id and volume_id datatypes in iscsi_targets table + CREATE TABLE iscsi_targets_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + target_num INTEGER, + host VARCHAR(255), + volume_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO iscsi_targets_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + target_num, + host, + volume_id + FROM iscsi_targets; + DROP TABLE iscsi_targets; + ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets; + + CREATE TABLE volume_metadata_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + key VARCHAR(255), + value VARCHAR(255), + volume_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_metadata_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + key, + value, + volume_id + FROM volume_metadata; + DROP TABLE volume_metadata; + ALTER TABLE volume_metadata_backup RENAME TO volume_metadata; + + -- change volume_id and snapshot_id datatypes in bdm table + CREATE TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_uuid VARCHAR(36) NOT NULL, + device_name VARCHAR(255), + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id INTEGER, + volume_id INTEGER, + volume_size INTEGER, + no_device BOOLEAN, + connection_info VARCHAR(255), + FOREIGN KEY(instance_uuid) REFERENCES instances(id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots(id), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO block_device_mapping_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + instance_uuid, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + DROP TABLE block_device_mapping; + ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping; + + -- change volume_id and sm_volume_table + CREATE TABLE sm_volume_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + backend_id INTEGER NOT NULL, + vdi_uuid VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0,1)) + ); + INSERT INTO sm_volume_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + backend_id, + vdi_uuid + FROM sm_volume; + DROP TABLE sm_volume; + ALTER TABLE sm_volume_backup RENAME TO sm_volume; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql new file mode 100644 index 00000000000..53fbc69f6e6 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/090_sqlite_upgrade.sql @@ -0,0 +1,226 @@ +BEGIN TRANSACTION; + + -- change id and snapshot_id datatypes in volumes table + CREATE TABLE volumes_backup( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + ec2_id INTEGER, + user_id VARCHAR(255), + project_id VARCHAR(255), + snapshot_id VARCHAR(36), + host VARCHAR(255), + size INTEGER, + availability_zone VARCHAR(255), + instance_id INTEGER, + mountpoint VARCHAR(255), + attach_time VARCHAR(255), + status VARCHAR(255), + attach_status VARCHAR(255), + scheduled_at DATETIME, + launched_at DATETIME, + terminated_at DATETIME, + display_name VARCHAR(255), + display_description VARCHAR(255), + provider_location VARCHAR(255), + provider_auth VARCHAR(255), + volume_type_id INTEGER, + PRIMARY KEY (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO volumes_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + ec2_id, + user_id, + project_id, + snapshot_id, + host, + size, + availability_zone, + instance_id, + mountpoint, + attach_time, + status, + attach_status, + scheduled_at, + launched_at, + terminated_at, + display_name, + display_description, + provider_location, + provider_auth, + volume_type_id + FROM volumes; + DROP TABLE volumes; + ALTER TABLE volumes_backup RENAME TO volumes; + + -- change id and volume_id datatypes in snapshots table + CREATE TABLE snapshots_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + user_id VARCHAR(255), + project_id VARCHAR(255), + volume_id VARCHAR(36), + status VARCHAR(255), + progress VARCHAR(255), + volume_size INTEGER, + display_name VARCHAR(255), + display_description VARCHAR(255), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO snapshots_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + user_id, + project_id, + volume_id, + status, + progress, + volume_size, + display_name, + display_description + FROM snapshots; + DROP TABLE snapshots; + ALTER TABLE snapshots_backup RENAME TO snapshots; + + -- change id and volume_id datatypes in iscsi_targets table + CREATE TABLE iscsi_targets_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + target_num INTEGER, + host VARCHAR(255), + volume_id VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO iscsi_targets_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + target_num, + host, + volume_id + FROM iscsi_targets; + DROP TABLE iscsi_targets; + ALTER TABLE iscsi_targets_backup RENAME TO iscsi_targets; + + CREATE TABLE volume_metadata_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + key VARCHAR(255), + value VARCHAR(255), + volume_id VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO volume_metadata_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + key, + value, + volume_id + FROM volume_metadata; + DROP TABLE volume_metadata; + ALTER TABLE volume_metadata_backup RENAME TO volume_metadata; + + -- change volume_id and snapshot_id datatypes in bdm table + CREATE TABLE block_device_mapping_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + instance_uuid VARCHAR(36) NOT NULL, + device_name VARCHAR(255), + delete_on_termination BOOLEAN, + virtual_name VARCHAR(255), + snapshot_id VARCHAR(36), + volume_id VARCHAR(36), + volume_size INTEGER, + no_device BOOLEAN, + connection_info VARCHAR(255), + FOREIGN KEY(instance_uuid) REFERENCES instances(id), + FOREIGN KEY(volume_id) REFERENCES volumes(id), + FOREIGN KEY(snapshot_id) REFERENCES snapshots(id), + PRIMARY KEY (id), + UNIQUE (id), + CHECK (deleted IN (0, 1)) + ); + INSERT INTO block_device_mapping_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + instance_uuid, + device_name, + delete_on_termination, + virtual_name, + snapshot_id, + volume_id, + volume_size, + no_device, + connection_info + FROM block_device_mapping; + DROP TABLE block_device_mapping; + ALTER TABLE block_device_mapping_backup RENAME TO block_device_mapping; + + -- change volume_id and sm_volume_table + CREATE TABLE sm_volume_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id VARCHAR(36) NOT NULL, + backend_id INTEGER NOT NULL, + vdi_uuid VARCHAR(255), + PRIMARY KEY (id), + FOREIGN KEY(id) REFERENCES volumes(id), + UNIQUE (id), + CHECK (deleted IN (0,1)) + ); + INSERT INTO sm_volume_backup SELECT + created_at, + updated_at, + deleted_at, + deleted, + id, + backend_id, + vdi_uuid + FROM sm_volume; + DROP TABLE sm_volume; + ALTER TABLE sm_volume_backup RENAME TO sm_volume; + +COMMIT; diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py b/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py new file mode 100644 index 00000000000..b9ec5c83e05 --- /dev/null +++ b/cinder/db/sqlalchemy/migrate_repo/versions/091_convert_volume_ids_to_uuid.py @@ -0,0 +1,145 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, select, Table +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +def upgrade(migrate_engine): + """Convert volume and snapshot id columns from int to varchar.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + sm_volumes = Table('sm_volume', meta, autoload=True) + + volume_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + new_id = select([volume_mappings.c.uuid], + volume_mappings.c.id == v['id']) + + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(id=new_id).execute() + + sm_volumes.update().\ + where(sm_volumes.c.id == v['id']).\ + values(id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + iscsi_targets.update().\ + where(iscsi_targets.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + volume_metadata.update().\ + where(volume_metadata.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + new_id = select([snapshot_mappings.c.uuid], + volume_mappings.c.id == s['id']) + + volumes.update().\ + where(volumes.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.id == s['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + +def downgrade(migrate_engine): + """Convert volume and snapshot id columns back to int.""" + meta = MetaData() + meta.bind = migrate_engine + + volumes = Table('volumes', meta, autoload=True) + snapshots = Table('snapshots', meta, autoload=True) + iscsi_targets = Table('iscsi_targets', meta, autoload=True) + volume_metadata = Table('volume_metadata', meta, autoload=True) + block_device_mapping = Table('block_device_mapping', meta, autoload=True) + sm_volumes = Table('sm_volume', meta, autoload=True) + + volume_mappings = Table('volume_id_mappings', meta, autoload=True) + snapshot_mappings = Table('snapshot_id_mappings', meta, autoload=True) + + volume_list = list(volumes.select().execute()) + for v in volume_list: + new_id = select([volume_mappings.c.id], + volume_mappings.c.uuid == v['id']) + + volumes.update().\ + where(volumes.c.id == v['id']).\ + values(id=new_id).execute() + + sm_volumes.update().\ + where(sm_volumes.c.id == v['id']).\ + values(id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + iscsi_targets.update().\ + where(iscsi_targets.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + volume_metadata.update().\ + where(volume_metadata.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.volume_id == v['id']).\ + values(volume_id=new_id).execute() + + snapshot_list = list(snapshots.select().execute()) + for s in snapshot_list: + new_id = select([snapshot_mappings.c.id], + volume_mappings.c.uuid == s['id']) + + volumes.update().\ + where(volumes.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() + + snapshots.update().\ + where(snapshots.c.id == s['id']).\ + values(volume_id=new_id).execute() + + block_device_mapping.update().\ + where(block_device_mapping.c.snapshot_id == s['id']).\ + values(snapshot_id=new_id).execute() diff --git a/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py b/cinder/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/db/sqlalchemy/migration.py b/cinder/db/sqlalchemy/migration.py new file mode 100644 index 00000000000..153be1a1f49 --- /dev/null +++ b/cinder/db/sqlalchemy/migration.py @@ -0,0 +1,129 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import distutils.version as dist_version +import os +import sys + +from cinder.db.sqlalchemy.session import get_engine +from cinder import exception +from cinder import flags +from cinder import log as logging + + +import sqlalchemy +import migrate +from migrate.versioning import util as migrate_util + + +LOG = logging.getLogger(__name__) + + +@migrate_util.decorator +def patched_with_engine(f, *a, **kw): + url = a[0] + engine = migrate_util.construct_engine(url, **kw) + + try: + kw['engine'] = engine + return f(*a, **kw) + finally: + if isinstance(engine, migrate_util.Engine) and engine is not url: + migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine) + engine.dispose() + + +# TODO(jkoelker) When migrate 0.7.3 is released and cinder depends +# on that version or higher, this can be removed +MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3') +if (not hasattr(migrate, '__version__') or + dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION): + migrate_util.with_engine = patched_with_engine + + +# NOTE(jkoelker) Delay importing migrate until we are patched +from migrate import exceptions as versioning_exceptions +from migrate.versioning import api as versioning_api +from migrate.versioning.repository import Repository + +FLAGS = flags.FLAGS + +_REPOSITORY = None + + +def db_sync(version=None): + if version is not None: + try: + version = int(version) + except ValueError: + raise exception.Error(_("version should be an integer")) + + current_version = db_version() + repository = _find_migrate_repo() + if version is None or version > current_version: + return versioning_api.upgrade(get_engine(), repository, version) + else: + return versioning_api.downgrade(get_engine(), repository, + version) + + +def db_version(): + repository = _find_migrate_repo() + try: + return versioning_api.db_version(get_engine(), repository) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = get_engine() + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'zones', 'export_devices', + 'fixed_ips', 'floating_ips', 'instances', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_instance_association', + 'security_group_rules', 'security_groups', + 'services', 'migrations', + 'users', 'user_project_association', + 'user_project_role_association', + 'user_role_association', + 'virtual_storage_arrays', + 'volumes', 'volume_metadata', + 'volume_types', 'volume_type_extra_specs'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repository = _find_migrate_repo() + versioning_api.version_control(get_engine(), repository, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + global _REPOSITORY + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + if _REPOSITORY is None: + _REPOSITORY = Repository(path) + return _REPOSITORY diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py new file mode 100644 index 00000000000..732e6832f0e --- /dev/null +++ b/cinder/db/sqlalchemy/models.py @@ -0,0 +1,1063 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +SQLAlchemy models for cinder data. +""" + +from sqlalchemy.orm import relationship, backref, object_mapper +from sqlalchemy import Column, Integer, BigInteger, String, schema +from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float +from sqlalchemy.exc import IntegrityError +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.schema import ForeignKeyConstraint + +from cinder.db.sqlalchemy.session import get_session + +from cinder import exception +from cinder import flags +from cinder import utils + + +FLAGS = flags.FLAGS +BASE = declarative_base() + + +class CinderBase(object): + """Base class for Cinder Models.""" + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) + deleted_at = Column(DateTime) + deleted = Column(Boolean, default=False) + metadata = None + + def save(self, session=None): + """Save this object.""" + if not session: + session = get_session() + session.add(self) + try: + session.flush() + except IntegrityError, e: + if str(e).endswith('is not unique'): + raise exception.Duplicate(str(e)) + else: + raise + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = utils.utcnow() + self.save(session=session) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def __getitem__(self, key): + return getattr(self, key) + + def get(self, key, default=None): + return getattr(self, key, default) + + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + + def next(self): + n = self._i.next().name + return n, getattr(self, n) + + def update(self, values): + """Make the model object behave like a dict""" + for k, v in values.iteritems(): + setattr(self, k, v) + + def iteritems(self): + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() + + +class Service(BASE, CinderBase): + """Represents a running service on a host.""" + + __tablename__ = 'services' + id = Column(Integer, primary_key=True) + host = Column(String(255)) # , ForeignKey('hosts.id')) + binary = Column(String(255)) + topic = Column(String(255)) + report_count = Column(Integer, nullable=False, default=0) + disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='cinder') + + +class ComputeNode(BASE, CinderBase): + """Represents a running compute service on a host.""" + + __tablename__ = 'compute_nodes' + id = Column(Integer, primary_key=True) + service_id = Column(Integer, ForeignKey('services.id'), nullable=True) + service = relationship(Service, + backref=backref('compute_node'), + foreign_keys=service_id, + primaryjoin='and_(' + 'ComputeNode.service_id == Service.id,' + 'ComputeNode.deleted == False)') + + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) + hypervisor_hostname = Column(String(255)) + + # Free Ram, amount of activity (resize, migration, boot, etc) and + # the number of running VM's are a good starting point for what's + # important when making scheduling decisions. + # + # NOTE(sandy): We'll need to make this extensible for other schedulers. + free_ram_mb = Column(Integer) + free_disk_gb = Column(Integer) + current_workload = Column(Integer) + running_vms = Column(Integer) + + # Note(masumotok): Expected Strings example: + # + # '{"arch":"x86_64", + # "model":"Nehalem", + # "topology":{"sockets":1, "threads":2, "cores":3}, + # "features":["tdtscp", "xtpr"]}' + # + # Points are "json translatable" and it must have all dictionary keys + # above, since it is copied from tag of getCapabilities() + # (See libvirt.virtConnection). + cpu_info = Column(Text, nullable=True) + disk_available_least = Column(Integer) + + +class Certificate(BASE, CinderBase): + """Represents a an x509 certificate""" + __tablename__ = 'certificates' + id = Column(Integer, primary_key=True) + + user_id = Column(String(255)) + project_id = Column(String(255)) + file_name = Column(String(255)) + + +class Instance(BASE, CinderBase): + """Represents a guest vm.""" + __tablename__ = 'instances' + injected_files = [] + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + try: + base_name = FLAGS.instance_name_template % self.id + except TypeError: + # Support templates like "uuid-%(uuid)s", etc. + info = {} + for key, value in self.iteritems(): + # prevent recursion if someone specifies %(name)s + # %(name)s will not be valid. + if key == 'name': + continue + info[key] = value + try: + base_name = FLAGS.instance_name_template % info + except KeyError: + base_name = self.uuid + if getattr(self, '_rescue', False): + base_name += "-rescue" + return base_name + + user_id = Column(String(255)) + project_id = Column(String(255)) + + image_ref = Column(String(255)) + kernel_id = Column(String(255)) + ramdisk_id = Column(String(255)) + server_name = Column(String(255)) + +# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) +# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) +# kernel = relationship(Kernel, backref=backref('instances', order_by=id)) +# project = relationship(Project, backref=backref('instances', order_by=id)) + + launch_index = Column(Integer) + key_name = Column(String(255)) + key_data = Column(Text) + + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) + + memory_mb = Column(Integer) + vcpus = Column(Integer) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + + hostname = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + + # *not* flavor_id + instance_type_id = Column(Integer) + + user_data = Column(Text) + + reservation_id = Column(String(255)) + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + availability_zone = Column(String(255)) + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + # To remember on which host a instance booted. + # An instance may have moved to another host by live migraiton. + launched_on = Column(Text) + locked = Column(Boolean) + + os_type = Column(String(255)) + architecture = Column(String(255)) + vm_mode = Column(String(255)) + uuid = Column(String(36)) + + root_device_name = Column(String(255)) + default_ephemeral_device = Column(String(255), nullable=True) + default_swap_device = Column(String(255), nullable=True) + config_drive = Column(String(255)) + + # User editable field meant to represent what ip should be used + # to connect to the instance + access_ip_v4 = Column(String(255)) + access_ip_v6 = Column(String(255)) + + auto_disk_config = Column(Boolean()) + progress = Column(Integer) + + # EC2 instance_initiated_shutdown_teminate + # True: -> 'terminate' + # False: -> 'stop' + shutdown_terminate = Column(Boolean(), default=True, nullable=False) + + # EC2 disable_api_termination + disable_terminate = Column(Boolean(), default=False, nullable=False) + + # OpenStack compute cell name + cell_name = Column(String(255)) + + +class InstanceInfoCache(BASE, CinderBase): + """ + Represents a cache of information about an instance + """ + __tablename__ = 'instance_info_caches' + id = Column(Integer, primary_key=True, autoincrement=True) + + # text column used for storing a json object of network data for api + network_info = Column(Text) + + instance_id = Column(String(36), ForeignKey('instances.uuid'), + nullable=False, unique=True) + instance = relationship(Instance, + backref=backref('info_cache', uselist=False), + foreign_keys=instance_id, + primaryjoin=instance_id == Instance.uuid) + + +class InstanceActions(BASE, CinderBase): + """Represents a guest VM's actions and results""" + __tablename__ = "instance_actions" + id = Column(Integer, primary_key=True) + instance_uuid = Column(String(36), ForeignKey('instances.uuid')) + action = Column(String(255)) + error = Column(Text) + + +class InstanceTypes(BASE, CinderBase): + """Represent possible instance_types or flavor of VM offered""" + __tablename__ = "instance_types" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + memory_mb = Column(Integer) + vcpus = Column(Integer) + root_gb = Column(Integer) + ephemeral_gb = Column(Integer) + flavorid = Column(String(255)) + swap = Column(Integer, nullable=False, default=0) + rxtx_factor = Column(Float, nullable=False, default=1) + vcpu_weight = Column(Integer, nullable=True) + + instances = relationship(Instance, + backref=backref('instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Instance.instance_type_id == ' + 'InstanceTypes.id, ' + 'InstanceTypes.deleted == False)') + + +class Volume(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'volumes' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.volume_name_template % self.id + + ec2_id = Column(Integer) + user_id = Column(String(255)) + project_id = Column(String(255)) + + snapshot_id = Column(String(36)) + + host = Column(String(255)) # , ForeignKey('hosts.id')) + size = Column(Integer) + availability_zone = Column(String(255)) # TODO(vish): foreign key? + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) + instance = relationship(Instance, + backref=backref('volumes'), + foreign_keys=instance_id, + primaryjoin='and_(Volume.instance_id==Instance.id,' + 'Volume.deleted==False)') + mountpoint = Column(String(255)) + attach_time = Column(String(255)) # TODO(vish): datetime + status = Column(String(255)) # TODO(vish): enum? + attach_status = Column(String(255)) # TODO(vish): enum + + scheduled_at = Column(DateTime) + launched_at = Column(DateTime) + terminated_at = Column(DateTime) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + provider_location = Column(String(255)) + provider_auth = Column(String(255)) + + volume_type_id = Column(Integer) + + +class VolumeMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for a volume""" + __tablename__ = 'volume_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False) + volume = relationship(Volume, backref="volume_metadata", + foreign_keys=volume_id, + primaryjoin='and_(' + 'VolumeMetadata.volume_id == Volume.id,' + 'VolumeMetadata.deleted == False)') + + +class VolumeTypes(BASE, CinderBase): + """Represent possible volume_types of volumes offered""" + __tablename__ = "volume_types" + id = Column(Integer, primary_key=True) + name = Column(String(255)) + + volumes = relationship(Volume, + backref=backref('volume_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(' + 'Volume.volume_type_id == VolumeTypes.id, ' + 'VolumeTypes.deleted == False)') + + +class VolumeTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for a volume_type""" + __tablename__ = 'volume_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + volume_type_id = Column(Integer, ForeignKey('volume_types.id'), + nullable=False) + volume_type = relationship(VolumeTypes, backref="extra_specs", + foreign_keys=volume_type_id, + primaryjoin='and_(' + 'VolumeTypeExtraSpecs.volume_type_id == VolumeTypes.id,' + 'VolumeTypeExtraSpecs.deleted == False)') + + +class Quota(BASE, CinderBase): + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then the + default for the quota class is used. If there is no row for a + given quota class and resource, then the default for the + deployment is used. If the row is present but the hard limit is + Null, then the resource is unlimited. + """ + + __tablename__ = 'quotas' + id = Column(Integer, primary_key=True) + + project_id = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class QuotaClass(BASE, CinderBase): + """Represents a single quota override for a quota class. + + If there is no row for a given quota class and resource, then the + default for the deployment is used. If the row is present but the + hard limit is Null, then the resource is unlimited. + """ + + __tablename__ = 'quota_classes' + id = Column(Integer, primary_key=True) + + class_name = Column(String(255), index=True) + + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) + + +class Snapshot(BASE, CinderBase): + """Represents a block storage device that can be attached to a vm.""" + __tablename__ = 'snapshots' + id = Column(String(36), primary_key=True) + + @property + def name(self): + return FLAGS.snapshot_name_template % self.id + + @property + def volume_name(self): + return FLAGS.volume_name_template % self.volume_id + + user_id = Column(String(255)) + project_id = Column(String(255)) + + volume_id = Column(String(36)) + status = Column(String(255)) + progress = Column(String(255)) + volume_size = Column(Integer) + + display_name = Column(String(255)) + display_description = Column(String(255)) + + +class BlockDeviceMapping(BASE, CinderBase): + """Represents block device mapping that is defined by EC2""" + __tablename__ = "block_device_mapping" + id = Column(Integer, primary_key=True, autoincrement=True) + + instance_uuid = Column(Integer, ForeignKey('instances.uuid'), + nullable=False) + instance = relationship(Instance, + backref=backref('balock_device_mapping'), + foreign_keys=instance_uuid, + primaryjoin='and_(BlockDeviceMapping.' + 'instance_uuid==' + 'Instance.uuid,' + 'BlockDeviceMapping.deleted==' + 'False)') + device_name = Column(String(255), nullable=False) + + # default=False for compatibility of the existing code. + # With EC2 API, + # default True for ami specified device. + # default False for created with other timing. + delete_on_termination = Column(Boolean, default=False) + + # for ephemeral device + virtual_name = Column(String(255), nullable=True) + + # for snapshot or volume + snapshot_id = Column(String(36), ForeignKey('snapshots.id')) + # outer join + snapshot = relationship(Snapshot, + foreign_keys=snapshot_id) + + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + foreign_keys=volume_id) + volume_size = Column(Integer, nullable=True) + + # for no device to suppress devices. + no_device = Column(Boolean, nullable=True) + + connection_info = Column(Text, nullable=True) + + +class IscsiTarget(BASE, CinderBase): + """Represates an iscsi target for a given host""" + __tablename__ = 'iscsi_targets' + __table_args__ = (schema.UniqueConstraint("target_num", "host"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + target_num = Column(Integer) + host = Column(String(255)) + volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + backref=backref('iscsi_target', uselist=False), + foreign_keys=volume_id, + primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' + 'IscsiTarget.deleted==False)') + + +class SecurityGroupInstanceAssociation(BASE, CinderBase): + __tablename__ = 'security_group_instance_association' + id = Column(Integer, primary_key=True) + security_group_id = Column(Integer, ForeignKey('security_groups.id')) + instance_id = Column(Integer, ForeignKey('instances.id')) + + +class SecurityGroup(BASE, CinderBase): + """Represents a security group.""" + __tablename__ = 'security_groups' + id = Column(Integer, primary_key=True) + + name = Column(String(255)) + description = Column(String(255)) + user_id = Column(String(255)) + project_id = Column(String(255)) + + instances = relationship(Instance, + secondary="security_group_instance_association", + primaryjoin='and_(' + 'SecurityGroup.id == ' + 'SecurityGroupInstanceAssociation.security_group_id,' + 'SecurityGroupInstanceAssociation.deleted == False,' + 'SecurityGroup.deleted == False)', + secondaryjoin='and_(' + 'SecurityGroupInstanceAssociation.instance_id == Instance.id,' + # (anthony) the condition below shouldn't be necessary now that the + # association is being marked as deleted. However, removing this + # may cause existing deployments to choke, so I'm leaving it + 'Instance.deleted == False)', + backref='security_groups') + + +class SecurityGroupIngressRule(BASE, CinderBase): + """Represents a rule in a security group.""" + __tablename__ = 'security_group_rules' + id = Column(Integer, primary_key=True) + + parent_group_id = Column(Integer, ForeignKey('security_groups.id')) + parent_group = relationship("SecurityGroup", backref="rules", + foreign_keys=parent_group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(String(255)) + + # Note: This is not the parent SecurityGroup. It's SecurityGroup we're + # granting access for. + group_id = Column(Integer, ForeignKey('security_groups.id')) + grantee_group = relationship("SecurityGroup", + foreign_keys=group_id, + primaryjoin='and_(' + 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' + 'SecurityGroupIngressRule.deleted == False)') + + +class ProviderFirewallRule(BASE, CinderBase): + """Represents a rule in a security group.""" + __tablename__ = 'provider_fw_rules' + id = Column(Integer, primary_key=True) + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(String(255)) + + +class KeyPair(BASE, CinderBase): + """Represents a public key pair for ssh.""" + __tablename__ = 'key_pairs' + id = Column(Integer, primary_key=True) + + name = Column(String(255)) + + user_id = Column(String(255)) + + fingerprint = Column(String(255)) + public_key = Column(Text) + + +class Migration(BASE, CinderBase): + """Represents a running host-to-host migration.""" + __tablename__ = 'migrations' + id = Column(Integer, primary_key=True, nullable=False) + # NOTE(tr3buchet): the ____compute variables are instance['host'] + source_compute = Column(String(255)) + dest_compute = Column(String(255)) + # NOTE(tr3buchet): dest_host, btw, is an ip address + dest_host = Column(String(255)) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) + instance_uuid = Column(String(255), ForeignKey('instances.uuid'), + nullable=True) + #TODO(_cerberus_): enum + status = Column(String(255)) + + +class Network(BASE, CinderBase): + """Represents a network.""" + __tablename__ = 'networks' + __table_args__ = (schema.UniqueConstraint("vpn_public_address", + "vpn_public_port"), + {'mysql_engine': 'InnoDB'}) + id = Column(Integer, primary_key=True) + label = Column(String(255)) + + injected = Column(Boolean, default=False) + cidr = Column(String(255), unique=True) + cidr_v6 = Column(String(255), unique=True) + multi_host = Column(Boolean, default=False) + + gateway_v6 = Column(String(255)) + netmask_v6 = Column(String(255)) + netmask = Column(String(255)) + bridge = Column(String(255)) + bridge_interface = Column(String(255)) + gateway = Column(String(255)) + broadcast = Column(String(255)) + dns1 = Column(String(255)) + dns2 = Column(String(255)) + + vlan = Column(Integer) + vpn_public_address = Column(String(255)) + vpn_public_port = Column(Integer) + vpn_private_address = Column(String(255)) + dhcp_start = Column(String(255)) + + rxtx_base = Column(Integer) + + project_id = Column(String(255)) + priority = Column(Integer) + host = Column(String(255)) # , ForeignKey('hosts.id')) + uuid = Column(String(36)) + + +class VirtualInterface(BASE, CinderBase): + """Represents a virtual interface on an instance.""" + __tablename__ = 'virtual_interfaces' + id = Column(Integer, primary_key=True) + address = Column(String(255), unique=True) + network_id = Column(Integer, nullable=False) + instance_id = Column(Integer, nullable=False) + uuid = Column(String(36)) + + +# TODO(vish): can these both come from the same baseclass? +class FixedIp(BASE, CinderBase): + """Represents a fixed ip for an instance.""" + __tablename__ = 'fixed_ips' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + network_id = Column(Integer, nullable=True) + virtual_interface_id = Column(Integer, nullable=True) + instance_id = Column(Integer, nullable=True) + # associated means that a fixed_ip has its instance_id column set + # allocated means that a fixed_ip has a its virtual_interface_id column set + allocated = Column(Boolean, default=False) + # leased means dhcp bridge has leased the ip + leased = Column(Boolean, default=False) + reserved = Column(Boolean, default=False) + host = Column(String(255)) + + +class FloatingIp(BASE, CinderBase): + """Represents a floating ip that dynamically forwards to a fixed ip.""" + __tablename__ = 'floating_ips' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + fixed_ip_id = Column(Integer, nullable=True) + project_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False, nullable=False) + pool = Column(String(255)) + interface = Column(String(255)) + + +class AuthToken(BASE, CinderBase): + """Represents an authorization token for all API transactions. + + Fields are a string representing the actual token and a user id for + mapping to the actual user + + """ + __tablename__ = 'auth_tokens' + token_hash = Column(String(255), primary_key=True) + user_id = Column(String(255)) + server_management_url = Column(String(255)) + storage_url = Column(String(255)) + cdn_management_url = Column(String(255)) + + +class User(BASE, CinderBase): + """Represents a user.""" + __tablename__ = 'users' + id = Column(String(255), primary_key=True) + + name = Column(String(255)) + access_key = Column(String(255)) + secret_key = Column(String(255)) + + is_admin = Column(Boolean) + + +class Project(BASE, CinderBase): + """Represents a project.""" + __tablename__ = 'projects' + id = Column(String(255), primary_key=True) + name = Column(String(255)) + description = Column(String(255)) + + project_manager = Column(String(255), ForeignKey(User.id)) + + members = relationship(User, + secondary='user_project_association', + backref='projects') + + +class DNSDomain(BASE, CinderBase): + """Represents a DNS domain with availability zone or project info.""" + __tablename__ = 'dns_domains' + domain = Column(String(512), primary_key=True) + scope = Column(String(255)) + availability_zone = Column(String(255)) + project_id = Column(String(255)) + project = relationship(Project, + primaryjoin=project_id == Project.id, + foreign_keys=[Project.id], + uselist=False) + + +class UserProjectRoleAssociation(BASE, CinderBase): + __tablename__ = 'user_project_role_association' + user_id = Column(String(255), primary_key=True) + user = relationship(User, + primaryjoin=user_id == User.id, + foreign_keys=[User.id], + uselist=False) + + project_id = Column(String(255), primary_key=True) + project = relationship(Project, + primaryjoin=project_id == Project.id, + foreign_keys=[Project.id], + uselist=False) + + role = Column(String(255), primary_key=True) + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']) + + +class UserRoleAssociation(BASE, CinderBase): + __tablename__ = 'user_role_association' + user_id = Column(String(255), ForeignKey('users.id'), primary_key=True) + user = relationship(User, backref='roles') + role = Column(String(255), primary_key=True) + + +class UserProjectAssociation(BASE, CinderBase): + __tablename__ = 'user_project_association' + user_id = Column(String(255), ForeignKey(User.id), primary_key=True) + project_id = Column(String(255), ForeignKey(Project.id), primary_key=True) + + +class ConsolePool(BASE, CinderBase): + """Represents pool of consoles on the same physical node.""" + __tablename__ = 'console_pools' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + console_type = Column(String(255)) + public_hostname = Column(String(255)) + host = Column(String(255)) + compute_host = Column(String(255)) + + +class Console(BASE, CinderBase): + """Represents a console session for an instance.""" + __tablename__ = 'consoles' + id = Column(Integer, primary_key=True) + instance_name = Column(String(255)) + instance_id = Column(Integer) + password = Column(String(255)) + port = Column(Integer, nullable=True) + pool_id = Column(Integer, ForeignKey('console_pools.id')) + pool = relationship(ConsolePool, backref=backref('consoles')) + + +class InstanceMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for an instance""" + __tablename__ = 'instance_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref="metadata", + foreign_keys=instance_id, + primaryjoin='and_(' + 'InstanceMetadata.instance_id == Instance.id,' + 'InstanceMetadata.deleted == False)') + + +class InstanceTypeExtraSpecs(BASE, CinderBase): + """Represents additional specs as key/value pairs for an instance_type""" + __tablename__ = 'instance_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_type_id = Column(Integer, ForeignKey('instance_types.id'), + nullable=False) + instance_type = relationship(InstanceTypes, backref="extra_specs", + foreign_keys=instance_type_id, + primaryjoin='and_(' + 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' + 'InstanceTypeExtraSpecs.deleted == False)') + + +class Cell(BASE, CinderBase): + """Represents parent and child cells of this cell.""" + __tablename__ = 'cells' + id = Column(Integer, primary_key=True) + name = Column(String(255)) + api_url = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + weight_offset = Column(Float(), default=0.0) + weight_scale = Column(Float(), default=1.0) + is_parent = Column(Boolean()) + rpc_host = Column(String(255)) + rpc_port = Column(Integer()) + rpc_virtual_host = Column(String(255)) + + +class AggregateHost(BASE, CinderBase): + """Represents a host that is member of an aggregate.""" + __tablename__ = 'aggregate_hosts' + id = Column(Integer, primary_key=True, autoincrement=True) + host = Column(String(255), unique=True) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class AggregateMetadata(BASE, CinderBase): + """Represents a metadata key/value pair for an aggregate.""" + __tablename__ = 'aggregate_metadata' + id = Column(Integer, primary_key=True) + key = Column(String(255), nullable=False) + value = Column(String(255), nullable=False) + aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) + + +class Aggregate(BASE, CinderBase): + """Represents a cluster of hosts that exists in this zone.""" + __tablename__ = 'aggregates' + id = Column(Integer, primary_key=True, autoincrement=True) + name = Column(String(255), unique=True) + operational_state = Column(String(255), nullable=False) + availability_zone = Column(String(255), nullable=False) + _hosts = relationship(AggregateHost, + secondary="aggregate_hosts", + primaryjoin='and_(' + 'Aggregate.id == AggregateHost.aggregate_id,' + 'AggregateHost.deleted == False,' + 'Aggregate.deleted == False)', + secondaryjoin='and_(' + 'AggregateHost.aggregate_id == Aggregate.id, ' + 'AggregateHost.deleted == False,' + 'Aggregate.deleted == False)', + backref='aggregates') + + _metadata = relationship(AggregateMetadata, + secondary="aggregate_metadata", + primaryjoin='and_(' + 'Aggregate.id == AggregateMetadata.aggregate_id,' + 'AggregateMetadata.deleted == False,' + 'Aggregate.deleted == False)', + secondaryjoin='and_(' + 'AggregateMetadata.aggregate_id == Aggregate.id, ' + 'AggregateMetadata.deleted == False,' + 'Aggregate.deleted == False)', + backref='aggregates') + + @property + def hosts(self): + return [h.host for h in self._hosts] + + @property + def metadetails(self): + return dict([(m.key, m.value) for m in self._metadata]) + + +class AgentBuild(BASE, CinderBase): + """Represents an agent build.""" + __tablename__ = 'agent_builds' + id = Column(Integer, primary_key=True) + hypervisor = Column(String(255)) + os = Column(String(255)) + architecture = Column(String(255)) + version = Column(String(255)) + url = Column(String(255)) + md5hash = Column(String(255)) + + +class BandwidthUsage(BASE, CinderBase): + """Cache for instance bandwidth usage data pulled from the hypervisor""" + __tablename__ = 'bw_usage_cache' + id = Column(Integer, primary_key=True, nullable=False) + uuid = Column(String(36), nullable=False) + mac = Column(String(255), nullable=False) + start_period = Column(DateTime, nullable=False) + last_refreshed = Column(DateTime) + bw_in = Column(BigInteger) + bw_out = Column(BigInteger) + + +class S3Image(BASE, CinderBase): + """Compatibility layer for the S3 image service talking to Glance""" + __tablename__ = 's3_images' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class VolumeIdMapping(BASE, CinderBase): + """Compatability layer for the EC2 volume service""" + __tablename__ = 'volume_id_mappings' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class SnapshotIdMapping(BASE, CinderBase): + """Compatability layer for the EC2 snapshot service""" + __tablename__ = 'snapshot_id_mappings' + id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) + uuid = Column(String(36), nullable=False) + + +class SMFlavors(BASE, CinderBase): + """Represents a flavor for SM volumes.""" + __tablename__ = 'sm_flavors' + id = Column(Integer(), primary_key=True) + label = Column(String(255)) + description = Column(String(255)) + + +class SMBackendConf(BASE, CinderBase): + """Represents the connection to the backend for SM.""" + __tablename__ = 'sm_backend_config' + id = Column(Integer(), primary_key=True) + flavor_id = Column(Integer, ForeignKey('sm_flavors.id'), nullable=False) + sr_uuid = Column(String(255)) + sr_type = Column(String(255)) + config_params = Column(String(2047)) + + +class SMVolume(BASE, CinderBase): + __tablename__ = 'sm_volume' + id = Column(String(36), ForeignKey(Volume.id), primary_key=True) + backend_id = Column(Integer, ForeignKey('sm_backend_config.id'), + nullable=False) + vdi_uuid = Column(String(255)) + + +class InstanceFault(BASE, CinderBase): + __tablename__ = 'instance_faults' + id = Column(Integer(), primary_key=True, autoincrement=True) + instance_uuid = Column(String(36), + ForeignKey('instances.uuid'), + nullable=False) + code = Column(Integer(), nullable=False) + message = Column(String(255)) + details = Column(Text) + + +def register_models(): + """Register Models and create metadata. + + Called from cinder.db.sqlalchemy.__init__ as part of loading the driver, + it will never need to be called explicitly elsewhere unless the + connection is lost and needs to be reestablished. + """ + from sqlalchemy import create_engine + models = (AgentBuild, + Aggregate, + AggregateHost, + AggregateMetadata, + AuthToken, + Certificate, + Cell, + Console, + ConsolePool, + FixedIp, + FloatingIp, + Instance, + InstanceActions, + InstanceFault, + InstanceMetadata, + InstanceTypeExtraSpecs, + InstanceTypes, + IscsiTarget, + Migration, + Network, + Project, + SecurityGroup, + SecurityGroupIngressRule, + SecurityGroupInstanceAssociation, + Service, + SMBackendConf, + SMFlavors, + SMVolume, + User, + Volume, + VolumeMetadata, + VolumeTypeExtraSpecs, + VolumeTypes, + VolumeIdMapping, + SnapshotIdMapping, + ) + engine = create_engine(FLAGS.sql_connection, echo=False) + for model in models: + model.metadata.create_all(engine) diff --git a/cinder/db/sqlalchemy/session.py b/cinder/db/sqlalchemy/session.py new file mode 100644 index 00000000000..fd6eef4ba45 --- /dev/null +++ b/cinder/db/sqlalchemy/session.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Session Handling for SQLAlchemy backend.""" + +import time + +import sqlalchemy.interfaces +import sqlalchemy.orm +from sqlalchemy.exc import DisconnectionError, OperationalError +from sqlalchemy.pool import NullPool, StaticPool + +import cinder.exception +import cinder.flags as flags +import cinder.log as logging + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + +_ENGINE = None +_MAKER = None + + +def get_session(autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy session.""" + global _MAKER + + if _MAKER is None: + engine = get_engine() + _MAKER = get_maker(engine, autocommit, expire_on_commit) + + session = _MAKER() + session.query = cinder.exception.wrap_db_error(session.query) + session.flush = cinder.exception.wrap_db_error(session.flush) + return session + + +class SynchronousSwitchListener(sqlalchemy.interfaces.PoolListener): + + """Switch sqlite connections to non-synchronous mode""" + + def connect(self, dbapi_con, con_record): + dbapi_con.execute("PRAGMA synchronous = OFF") + + +class MySQLPingListener(object): + + """ + Ensures that MySQL connections checked out of the + pool are alive. + + Borrowed from: + http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f + """ + + def checkout(self, dbapi_con, con_record, con_proxy): + try: + dbapi_con.cursor().execute('select 1') + except dbapi_con.OperationalError, ex: + if ex.args[0] in (2006, 2013, 2014, 2045, 2055): + LOG.warn('Got mysql server has gone away: %s', ex) + raise DisconnectionError("Database server went away") + else: + raise + + +def is_db_connection_error(args): + """Return True if error in connecting to db.""" + # NOTE(adam_g): This is currently MySQL specific and needs to be extended + # to support Postgres and others. + conn_err_codes = ('2002', '2003', '2006') + for err_code in conn_err_codes: + if args.find(err_code) != -1: + return True + return False + + +def get_engine(): + """Return a SQLAlchemy engine.""" + global _ENGINE + if _ENGINE is None: + connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection) + + engine_args = { + "pool_recycle": FLAGS.sql_idle_timeout, + "echo": False, + 'convert_unicode': True, + } + + # Map our SQL debug level to SQLAlchemy's options + if FLAGS.sql_connection_debug >= 100: + engine_args['echo'] = 'debug' + elif FLAGS.sql_connection_debug >= 50: + engine_args['echo'] = True + + if "sqlite" in connection_dict.drivername: + engine_args["poolclass"] = NullPool + + if FLAGS.sql_connection == "sqlite://": + engine_args["poolclass"] = StaticPool + engine_args["connect_args"] = {'check_same_thread': False} + + if not FLAGS.sqlite_synchronous: + engine_args["listeners"] = [SynchronousSwitchListener()] + + if 'mysql' in connection_dict.drivername: + engine_args['listeners'] = [MySQLPingListener()] + + _ENGINE = sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args) + + try: + _ENGINE.connect() + except OperationalError, e: + if not is_db_connection_error(e.args[0]): + raise + + remaining = FLAGS.sql_max_retries + if remaining == -1: + remaining = 'infinite' + while True: + msg = _('SQL connection failed. %s attempts left.') + LOG.warn(msg % remaining) + if remaining != 'infinite': + remaining -= 1 + time.sleep(FLAGS.sql_retry_interval) + try: + _ENGINE.connect() + break + except OperationalError, e: + if (remaining != 'infinite' and remaining == 0) or \ + not is_db_connection_error(e.args[0]): + raise + return _ENGINE + + +def get_maker(engine, autocommit=True, expire_on_commit=False): + """Return a SQLAlchemy sessionmaker using the given engine.""" + return sqlalchemy.orm.sessionmaker(bind=engine, + autocommit=autocommit, + expire_on_commit=expire_on_commit) diff --git a/cinder/exception.py b/cinder/exception.py new file mode 100644 index 00000000000..b57c1925ea5 --- /dev/null +++ b/cinder/exception.py @@ -0,0 +1,938 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cinder base exception handling. + +Includes decorator for re-raising Cinder-type exceptions. + +SHOULD include dedicated exception logging. + +""" + +import functools +import sys + +import webob.exc + +from cinder import log as logging + +LOG = logging.getLogger(__name__) + + +class ConvertedException(webob.exc.WSGIHTTPException): + def __init__(self, code=0, title="", explanation=""): + self.code = code + self.title = title + self.explanation = explanation + super(ConvertedException, self).__init__() + + +class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = _('Unexpected error while running command.') + if exit_code is None: + exit_code = '-' + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() + IOError.__init__(self, message) + + +class Error(Exception): + pass + + +class DBError(Error): + """Wraps an implementation specific exception.""" + def __init__(self, inner_exception=None): + self.inner_exception = inner_exception + super(DBError, self).__init__(str(inner_exception)) + + +def wrap_db_error(f): + def _wrap(*args, **kwargs): + try: + return f(*args, **kwargs) + except UnicodeEncodeError: + raise InvalidUnicodeParameter() + except Exception, e: + LOG.exception(_('DB exception wrapped.')) + raise DBError(e) + _wrap.func_name = f.func_name + return _wrap + + +def wrap_exception(notifier=None, publisher_id=None, event_type=None, + level=None): + """This decorator wraps a method to catch any exceptions that may + get thrown. It logs the exception as well as optionally sending + it to the notification system. + """ + # TODO(sandy): Find a way to import cinder.notifier.api so we don't have + # to pass it in as a parameter. Otherwise we get a cyclic import of + # cinder.notifier.api -> cinder.utils -> cinder.exception :( + # TODO(johannes): Also, it would be nice to use + # utils.save_and_reraise_exception() without an import loop + def inner(f): + def wrapped(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + # Save exception since it can be clobbered during processing + # below before we can re-raise + exc_info = sys.exc_info() + + if notifier: + payload = dict(args=args, exception=e) + payload.update(kw) + + # Use a temp vars so we don't shadow + # our outer definitions. + temp_level = level + if not temp_level: + temp_level = notifier.ERROR + + temp_type = event_type + if not temp_type: + # If f has multiple decorators, they must use + # functools.wraps to ensure the name is + # propagated. + temp_type = f.__name__ + + notifier.notify(publisher_id, temp_type, temp_level, + payload) + + # re-raise original exception since it may have been clobbered + raise exc_info[0], exc_info[1], exc_info[2] + + return functools.wraps(f)(wrapped) + return inner + + +class CinderException(Exception): + """Base Cinder Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + + def __init__(self, message=None, **kwargs): + self.kwargs = kwargs + + if 'code' not in self.kwargs: + try: + self.kwargs['code'] = self.code + except AttributeError: + pass + + if not message: + try: + message = self.message % kwargs + + except Exception as e: + # kwargs doesn't match a variable in the message + # log the issue and the kwargs + LOG.exception(_('Exception in string format operation')) + for name, value in kwargs.iteritems(): + LOG.error("%s: %s" % (name, value)) + # at least get the core message out if something happened + message = self.message + + super(CinderException, self).__init__(message) + + +class DecryptionFailure(CinderException): + message = _("Failed to decrypt text") + + +class ImagePaginationFailed(CinderException): + message = _("Failed to paginate through images from image service") + + +class VirtualInterfaceCreateException(CinderException): + message = _("Virtual Interface creation failed") + + +class VirtualInterfaceMacAddressException(CinderException): + message = _("5 attempts to create virtual interface" + "with unique mac address failed") + + +class GlanceConnectionFailed(CinderException): + message = _("Connection to glance failed") + ": %(reason)s" + + +class MelangeConnectionFailed(CinderException): + message = _("Connection to melange failed") + ": %(reason)s" + + +class NotAuthorized(CinderException): + message = _("Not authorized.") + code = 403 + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges") + + +class PolicyNotAuthorized(NotAuthorized): + message = _("Policy doesn't allow %(action)s to be performed.") + + +class ImageNotAuthorized(CinderException): + message = _("Not authorized for image %(image_id)s.") + + +class Invalid(CinderException): + message = _("Unacceptable parameters.") + code = 400 + + +class InvalidSnapshot(Invalid): + message = _("Invalid snapshot") + ": %(reason)s" + + +class VolumeUnattached(Invalid): + message = _("Volume %(volume_id)s is not attached to anything") + + +class InvalidKeypair(Invalid): + message = _("Keypair data is invalid") + + +class SfJsonEncodeFailure(CinderException): + message = _("Failed to load data into json format") + + +class InvalidRequest(Invalid): + message = _("The request is invalid.") + + +class InvalidSignature(Invalid): + message = _("Invalid signature %(signature)s for user %(user)s.") + + +class InvalidInput(Invalid): + message = _("Invalid input received") + ": %(reason)s" + + +class InvalidInstanceType(Invalid): + message = _("Invalid instance type %(instance_type)s.") + + +class InvalidVolumeType(Invalid): + message = _("Invalid volume type") + ": %(reason)s" + + +class InvalidVolume(Invalid): + message = _("Invalid volume") + ": %(reason)s" + + +class InvalidPortRange(Invalid): + message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") + + +class InvalidIpProtocol(Invalid): + message = _("Invalid IP protocol %(protocol)s.") + + +class InvalidContentType(Invalid): + message = _("Invalid content type %(content_type)s.") + + +class InvalidCidr(Invalid): + message = _("Invalid cidr %(cidr)s.") + + +class InvalidRPCConnectionReuse(Invalid): + message = _("Invalid reuse of an RPC connection.") + + +class InvalidUnicodeParameter(Invalid): + message = _("Invalid Parameter: " + "Unicode is not supported by the current database.") + + +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + +class InvalidAggregateAction(Invalid): + message = _("Cannot perform action '%(action)s' on aggregate " + "%(aggregate_id)s. Reason: %(reason)s.") + + +class InvalidGroup(Invalid): + message = _("Group not valid. Reason: %(reason)s") + + +class InstanceInvalidState(Invalid): + message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " + "%(method)s while the instance is in this state.") + + +class InstanceNotRunning(Invalid): + message = _("Instance %(instance_id)s is not running.") + + +class InstanceNotSuspended(Invalid): + message = _("Instance %(instance_id)s is not suspended.") + + +class InstanceNotInRescueMode(Invalid): + message = _("Instance %(instance_id)s is not in rescue mode") + + +class InstanceSuspendFailure(Invalid): + message = _("Failed to suspend instance") + ": %(reason)s" + + +class InstanceResumeFailure(Invalid): + message = _("Failed to resume server") + ": %(reason)s." + + +class InstanceRebootFailure(Invalid): + message = _("Failed to reboot instance") + ": %(reason)s" + + +class InstanceTerminationFailure(Invalid): + message = _("Failed to terminate instance") + ": %(reason)s" + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class VolumeServiceUnavailable(ServiceUnavailable): + message = _("Volume service is unavailable at this time.") + + +class UnableToMigrateToSelf(Invalid): + message = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class DestinationHostUnavailable(Invalid): + message = _("Destination compute host is unavailable at this time.") + + +class SourceHostUnavailable(Invalid): + message = _("Original compute host is unavailable at this time.") + + +class InvalidHypervisorType(Invalid): + message = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + message = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class DestinationDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + +class InvalidDevicePath(Invalid): + message = _("The supplied device path (%(path)s) is invalid.") + + +class DeviceIsBusy(Invalid): + message = _("The supplied device (%(device)s) is busy.") + + +class InvalidCPUInfo(Invalid): + message = _("Unacceptable CPU info") + ": %(reason)s" + + +class InvalidIpAddressError(Invalid): + message = _("%(address)s is not a valid IP v4/6 address.") + + +class InvalidVLANTag(Invalid): + message = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + message = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class InvalidDiskFormat(Invalid): + message = _("Disk format %(disk_format)s is not acceptable") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable: %(reason)s") + + +class InstanceUnacceptable(Invalid): + message = _("Instance %(instance_id)s is unacceptable: %(reason)s") + + +class NotFound(CinderException): + message = _("Resource could not be found.") + code = 404 + + +class FlagNotSet(NotFound): + message = _("Required flag %(flag)s not set.") + + +class VolumeNotFound(NotFound): + message = _("Volume %(volume_id)s could not be found.") + + +class SfAccountNotFound(NotFound): + message = _("Unable to locate account %(account_name)s on " + "Solidfire device") + + +class VolumeNotFoundForInstance(VolumeNotFound): + message = _("Volume not found for instance %(instance_id)s.") + + +class VolumeMetadataNotFound(NotFound): + message = _("Volume %(volume_id)s has no metadata with " + "key %(metadata_key)s.") + + +class NoVolumeTypesFound(NotFound): + message = _("Zero volume types found.") + + +class VolumeTypeNotFound(NotFound): + message = _("Volume type %(volume_type_id)s could not be found.") + + +class VolumeTypeNotFoundByName(VolumeTypeNotFound): + message = _("Volume type with name %(volume_type_name)s " + "could not be found.") + + +class VolumeTypeExtraSpecsNotFound(NotFound): + message = _("Volume Type %(volume_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class SnapshotNotFound(NotFound): + message = _("Snapshot %(snapshot_id)s could not be found.") + + +class VolumeIsBusy(CinderException): + message = _("deleting volume %(volume_name)s that has snapshot") + + +class SnapshotIsBusy(CinderException): + message = _("deleting snapshot %(snapshot_name)s that has " + "dependent volumes") + + +class ISCSITargetNotFoundForVolume(NotFound): + message = _("No target id found for volume %(volume_id)s.") + + +class DiskNotFound(NotFound): + message = _("No disk at %(location)s") + + +class VolumeDriverNotFound(NotFound): + message = _("Could not find a handler for %(driver_type)s volume.") + + +class InvalidImageRef(Invalid): + message = _("Invalid image href %(image_href)s.") + + +class ListingImageRefsNotSupported(Invalid): + message = _("Some images have been stored via hrefs." + " This version of the api does not support displaying image hrefs.") + + +class ImageNotFound(NotFound): + message = _("Image %(image_id)s could not be found.") + + +class KernelNotFoundForImage(ImageNotFound): + message = _("Kernel not found for image %(image_id)s.") + + +class UserNotFound(NotFound): + message = _("User %(user_id)s could not be found.") + + +class ProjectNotFound(NotFound): + message = _("Project %(project_id)s could not be found.") + + +class ProjectMembershipNotFound(NotFound): + message = _("User %(user_id)s is not a member of project %(project_id)s.") + + +class UserRoleNotFound(NotFound): + message = _("Role %(role_id)s could not be found.") + + +class StorageRepositoryNotFound(NotFound): + message = _("Cannot find SR to read/write VDI.") + + +class DatastoreNotFound(NotFound): + message = _("Could not find the datastore reference(s) which the VM uses.") + + +class FixedIpNotFound(NotFound): + message = _("No fixed IP associated with id %(id)s.") + + +class FixedIpNotFoundForAddress(FixedIpNotFound): + message = _("Fixed ip not found for address %(address)s.") + + +class FixedIpNotFoundForInstance(FixedIpNotFound): + message = _("Instance %(instance_id)s has zero fixed ips.") + + +class FixedIpNotFoundForNetworkHost(FixedIpNotFound): + message = _("Network host %(host)s has zero fixed ips " + "in network %(network_id)s.") + + +class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): + message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") + + +class FixedIpNotFoundForHost(FixedIpNotFound): + message = _("Host %(host)s has zero fixed ips.") + + +class FixedIpNotFoundForNetwork(FixedIpNotFound): + message = _("Fixed IP address (%(address)s) does not exist in " + "network (%(network_uuid)s).") + + +class FixedIpAlreadyInUse(CinderException): + message = _("Fixed IP address %(address)s is already in use.") + + +class FixedIpInvalid(Invalid): + message = _("Fixed IP address %(address)s is invalid.") + + +class NoMoreFixedIps(CinderException): + message = _("Zero fixed ips available.") + + +class NoFixedIpsDefined(NotFound): + message = _("Zero fixed ips could be found.") + + +class FloatingIpNotFound(NotFound): + message = _("Floating ip not found for id %(id)s.") + + +class FloatingIpDNSExists(Invalid): + message = _("The DNS entry %(name)s already exists in domain %(domain)s.") + + +class FloatingIpNotFoundForAddress(FloatingIpNotFound): + message = _("Floating ip not found for address %(address)s.") + + +class FloatingIpNotFoundForHost(FloatingIpNotFound): + message = _("Floating ip not found for host %(host)s.") + + +class NoMoreFloatingIps(FloatingIpNotFound): + message = _("Zero floating ips available.") + + +class FloatingIpAssociated(CinderException): + message = _("Floating ip %(address)s is associated.") + + +class FloatingIpNotAssociated(CinderException): + message = _("Floating ip %(address)s is not associated.") + + +class NoFloatingIpsDefined(NotFound): + message = _("Zero floating ips exist.") + + +class NoFloatingIpInterface(NotFound): + message = _("Interface %(interface)s not found.") + + +class KeypairNotFound(NotFound): + message = _("Keypair %(name)s not found for user %(user_id)s") + + +class CertificateNotFound(NotFound): + message = _("Certificate %(certificate_id)s not found.") + + +class ServiceNotFound(NotFound): + message = _("Service %(service_id)s could not be found.") + + +class HostNotFound(NotFound): + message = _("Host %(host)s could not be found.") + + +class HostBinaryNotFound(NotFound): + message = _("Could not find binary %(binary)s on host %(host)s.") + + +class AuthTokenNotFound(NotFound): + message = _("Auth token %(token)s could not be found.") + + +class AccessKeyNotFound(NotFound): + message = _("Access Key %(access_key)s could not be found.") + + +class QuotaNotFound(NotFound): + message = _("Quota could not be found") + + +class ProjectQuotaNotFound(QuotaNotFound): + message = _("Quota for project %(project_id)s could not be found.") + + +class QuotaClassNotFound(QuotaNotFound): + message = _("Quota class %(class_name)s could not be found.") + + +class SecurityGroupNotFound(NotFound): + message = _("Security group %(security_group_id)s not found.") + + +class SecurityGroupNotFoundForProject(SecurityGroupNotFound): + message = _("Security group %(security_group_id)s not found " + "for project %(project_id)s.") + + +class SecurityGroupNotFoundForRule(SecurityGroupNotFound): + message = _("Security group with rule %(rule_id)s not found.") + + +class SecurityGroupExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is already associated" + " with the instance %(instance_id)s") + + +class SecurityGroupNotExistsForInstance(Invalid): + message = _("Security group %(security_group_id)s is not associated with" + " the instance %(instance_id)s") + + +class MigrationNotFound(NotFound): + message = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + message = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class NoInstanceTypesFound(NotFound): + message = _("Zero instance types found.") + + +class InstanceTypeNotFound(NotFound): + message = _("Instance type %(instance_type_id)s could not be found.") + + +class InstanceTypeNotFoundByName(InstanceTypeNotFound): + message = _("Instance type with name %(instance_type_name)s " + "could not be found.") + + +class FlavorNotFound(NotFound): + message = _("Flavor %(flavor_id)s could not be found.") + + +class CellNotFound(NotFound): + message = _("Cell %(cell_id)s could not be found.") + + +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerCostFunctionNotFound(NotFound): + message = _("Scheduler cost function %(cost_fn_str)s could" + " not be found.") + + +class SchedulerWeightFlagNotFound(NotFound): + message = _("Scheduler weight flag not found: %(flag_name)s") + + +class InstanceMetadataNotFound(NotFound): + message = _("Instance %(instance_id)s has no metadata with " + "key %(metadata_key)s.") + + +class InstanceTypeExtraSpecsNotFound(NotFound): + message = _("Instance Type %(instance_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + +class LDAPObjectNotFound(NotFound): + message = _("LDAP object could not be found") + + +class LDAPUserNotFound(LDAPObjectNotFound): + message = _("LDAP user %(user_id)s could not be found.") + + +class LDAPGroupNotFound(LDAPObjectNotFound): + message = _("LDAP group %(group_id)s could not be found.") + + +class LDAPGroupMembershipNotFound(NotFound): + message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.") + + +class FileNotFound(NotFound): + message = _("File %(file_path)s could not be found.") + + +class NoFilesFound(NotFound): + message = _("Zero files could be found.") + + +class SwitchNotFoundForNetworkAdapter(NotFound): + message = _("Virtual switch associated with the " + "network adapter %(adapter)s not found.") + + +class NetworkAdapterNotFound(NotFound): + message = _("Network adapter %(adapter)s could not be found.") + + +class ClassNotFound(NotFound): + message = _("Class %(class_name)s could not be found: %(exception)s") + + +class NotAllowed(CinderException): + message = _("Action not allowed.") + + +class GlobalRoleNotAllowed(NotAllowed): + message = _("Unable to use global role %(role_id)s") + + +class ImageRotationNotAllowed(CinderException): + message = _("Rotation is not allowed for snapshots") + + +class RotationRequiredForBackup(CinderException): + message = _("Rotation param is required for backup image_type") + + +#TODO(bcwaldon): EOL this exception! +class Duplicate(CinderException): + pass + + +class KeyPairExists(Duplicate): + message = _("Key pair %(key_name)s already exists.") + + +class UserExists(Duplicate): + message = _("User %(user)s already exists.") + + +class LDAPUserExists(UserExists): + message = _("LDAP user %(user)s already exists.") + + +class LDAPGroupExists(Duplicate): + message = _("LDAP group %(group)s already exists.") + + +class LDAPMembershipExists(Duplicate): + message = _("User %(uid)s is already a member of " + "the group %(group_dn)s") + + +class ProjectExists(Duplicate): + message = _("Project %(project)s already exists.") + + +class InstanceExists(Duplicate): + message = _("Instance %(name)s already exists.") + + +class InstanceTypeExists(Duplicate): + message = _("Instance Type %(name)s already exists.") + + +class VolumeTypeExists(Duplicate): + message = _("Volume Type %(name)s already exists.") + + +class InvalidSharedStorage(CinderException): + message = _("%(path)s is on shared storage: %(reason)s") + + +class MigrationError(CinderException): + message = _("Migration error") + ": %(reason)s" + + +class MalformedRequestBody(CinderException): + message = _("Malformed message body: %(reason)s") + + +class ConfigNotFound(NotFound): + message = _("Could not find config at %(path)s") + + +class PasteAppNotFound(NotFound): + message = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameSize(CinderException): + message = _("When resizing, instances must change size!") + + +class ImageTooLarge(CinderException): + message = _("Image is larger than instance type allows") + + +class ZoneRequestError(CinderException): + message = _("1 or more Zones could not complete the request") + + +class InstanceTypeMemoryTooSmall(CinderException): + message = _("Instance type's memory is too small for requested image.") + + +class InstanceTypeDiskTooSmall(CinderException): + message = _("Instance type's disk is too small for requested image.") + + +class InsufficientFreeMemory(CinderException): + message = _("Insufficient free memory on compute node to start %(uuid)s.") + + +class CouldNotFetchMetrics(CinderException): + message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") + + +class NoValidHost(CinderException): + message = _("No valid host was found. %(reason)s") + + +class WillNotSchedule(CinderException): + message = _("Host %(host)s is not up or doesn't exist.") + + +class QuotaError(CinderException): + message = _("Quota exceeded") + ": code=%(code)s" + + +class AggregateError(CinderException): + message = _("Aggregate %(aggregate_id)s: action '%(action)s' " + "caused an error: %(reason)s.") + + +class AggregateNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s could not be found.") + + +class AggregateNameExists(Duplicate): + message = _("Aggregate %(aggregate_name)s already exists.") + + +class AggregateHostNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s has no host %(host)s.") + + +class AggregateMetadataNotFound(NotFound): + message = _("Aggregate %(aggregate_id)s has no metadata with " + "key %(metadata_key)s.") + + +class AggregateHostConflict(Duplicate): + message = _("Host %(host)s already member of another aggregate.") + + +class AggregateHostExists(Duplicate): + message = _("Aggregate %(aggregate_id)s already has host %(host)s.") + + +class DuplicateSfVolumeNames(Duplicate): + message = _("Detected more than one volume with name %(vol_name)s") + + +class VolumeTypeCreateFailed(CinderException): + message = _("Cannot create volume_type with " + "name %(name)s and specs %(extra_specs)s") + + +class InstanceTypeCreateFailed(CinderException): + message = _("Unable to create instance type") + + +class SolidFireAPIException(CinderException): + message = _("Bad response from SolidFire API") + + +class SolidFireAPIStatusException(SolidFireAPIException): + message = _("Error in SolidFire API response: status=%(status)s") + + +class SolidFireAPIDataException(SolidFireAPIException): + message = _("Error in SolidFire API response: data=%(data)s") + + +class DuplicateVlan(Duplicate): + message = _("Detected existing vlan with id %(vlan)d") + + +class InstanceNotFound(NotFound): + message = _("Instance %(instance_id)s could not be found.") + + +class InvalidInstanceIDMalformed(Invalid): + message = _("Invalid id: %(val)s (expecting \"i-...\").") + + +class CouldNotFetchImage(CinderException): + message = _("Could not fetch image %(image)s") diff --git a/cinder/flags.py b/cinder/flags.py new file mode 100644 index 00000000000..3f1d9f5d889 --- /dev/null +++ b/cinder/flags.py @@ -0,0 +1,356 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Command-line flag library. + +Emulates gflags by wrapping cfg.ConfigOpts. + +The idea is to move fully to cfg eventually, and this wrapper is a +stepping stone. + +""" + +import os +import socket +import sys + +from cinder.compat import flagfile +from cinder.openstack.common import cfg + + +class CinderConfigOpts(cfg.CommonConfigOpts): + + def __init__(self, *args, **kwargs): + super(CinderConfigOpts, self).__init__(*args, **kwargs) + self.disable_interspersed_args() + + def __call__(self, argv): + with flagfile.handle_flagfiles_managed(argv[1:]) as args: + return argv[:1] + super(CinderConfigOpts, self).__call__(args) + + +FLAGS = CinderConfigOpts() + + +class UnrecognizedFlag(Exception): + pass + + +def DECLARE(name, module_string, flag_values=FLAGS): + if module_string not in sys.modules: + __import__(module_string, globals(), locals()) + if name not in flag_values: + raise UnrecognizedFlag('%s not defined by %s' % (name, module_string)) + + +def _get_my_ip(): + """ + Returns the actual ip of the local machine. + + This code figures out what source address would be used if some traffic + were to be sent out to some well known address on the Internet. In this + case, a Google DNS server is used, but the specific address does not + matter much. No traffic is actually sent. + """ + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.error: + return "127.0.0.1" + + +log_opts = [ + cfg.StrOpt('logdir', + default=None, + help='Log output to a per-service log file in named directory'), + cfg.StrOpt('logfile', + default=None, + help='Log output to a named file'), + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + ] + +core_opts = [ + cfg.StrOpt('connection_type', + default=None, + help='Virtualization api connection type : libvirt, xenapi, ' + 'or fake'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='The SQLAlchemy connection string used to connect to the ' + 'database'), + cfg.IntOpt('sql_connection_debug', + default=0, + help='Verbosity of SQL debugging information. 0=None, ' + '100=Everything'), + cfg.StrOpt('api_paste_config', + default="api-paste.ini", + help='File name for the paste.deploy config for cinder-api'), + cfg.StrOpt('pybasedir', + default=os.path.abspath(os.path.join(os.path.dirname(__file__), + '../')), + help='Directory where the cinder python module is installed'), + cfg.StrOpt('bindir', + default='$pybasedir/bin', + help='Directory where cinder binaries are installed'), + cfg.StrOpt('state_path', + default='$pybasedir', + help="Top-level directory for maintaining cinder's state"), + cfg.StrOpt('lock_path', + default='$pybasedir', + help='Directory to use for lock files'), + ] + +debug_opts = [ + cfg.BoolOpt('fake_rabbit', + default=False, + help='If passed, use a fake RabbitMQ provider'), +] + +FLAGS.register_cli_opts(log_opts) +FLAGS.register_cli_opts(core_opts) +FLAGS.register_cli_opts(debug_opts) + +global_opts = [ + cfg.StrOpt('my_ip', + default=_get_my_ip(), + help='ip address of this host'), + cfg.ListOpt('region_list', + default=[], + help='list of region=fqdn pairs separated by commas'), + cfg.StrOpt('aws_access_key_id', + default='admin', + help='AWS Access ID'), + cfg.StrOpt('aws_secret_access_key', + default='admin', + help='AWS Access Key'), + cfg.StrOpt('glance_host', + default='$my_ip', + help='default glance hostname or ip'), + cfg.IntOpt('glance_port', + default=9292, + help='default glance port'), + cfg.ListOpt('glance_api_servers', + default=['$glance_host:$glance_port'], + help='A list of the glance api servers available to cinder ' + '([hostname|ip]:port)'), + cfg.StrOpt('scheduler_topic', + default='scheduler', + help='the topic scheduler nodes listen on'), + cfg.StrOpt('volume_topic', + default='volume', + help='the topic volume nodes listen on'), + cfg.StrOpt('rabbit_host', + default='localhost', + help='the RabbitMQ host'), + cfg.IntOpt('rabbit_port', + default=5672, + help='the RabbitMQ port'), + cfg.BoolOpt('rabbit_use_ssl', + default=False, + help='connect over SSL for RabbitMQ'), + cfg.StrOpt('rabbit_userid', + default='guest', + help='the RabbitMQ userid'), + cfg.StrOpt('rabbit_password', + default='guest', + help='the RabbitMQ password'), + cfg.StrOpt('rabbit_virtual_host', + default='/', + help='the RabbitMQ virtual host'), + cfg.IntOpt('rabbit_retry_interval', + default=1, + help='how frequently to retry connecting with RabbitMQ'), + cfg.IntOpt('rabbit_retry_backoff', + default=2, + help='how long to backoff for between retries when connecting ' + 'to RabbitMQ'), + cfg.IntOpt('rabbit_max_retries', + default=0, + help='maximum retries with trying to connect to RabbitMQ ' + '(the default of 0 implies an infinite retry count)'), + cfg.StrOpt('control_exchange', + default='cinder', + help='the main RabbitMQ exchange to connect to'), + cfg.BoolOpt('rabbit_durable_queues', + default=False, + help='use durable queues in RabbitMQ'), + cfg.BoolOpt('api_rate_limit', + default=True, + help='whether to rate limit the api'), + cfg.ListOpt('enabled_apis', + default=['osapi_volume'], + help='a list of APIs to enable by default'), + cfg.ListOpt('osapi_volume_ext_list', + default=[], + help='Specify list of extensions to load when using osapi_' + 'volume_extension option with cinder.api.openstack.' + 'volume.contrib.select_extensions'), + cfg.MultiStrOpt('osapi_volume_extension', + default=[ + 'cinder.api.openstack.volume.contrib.standard_extensions' + ], + help='osapi volume extension to load'), + cfg.StrOpt('osapi_scheme', + default='http', + help='the protocol to use when connecting to the openstack api ' + 'server (http, https)'), + cfg.StrOpt('osapi_path', + default='/v1.1/', + help='the path prefix used to call the openstack api server'), + cfg.StrOpt('osapi_compute_link_prefix', + default=None, + help='Base URL that will be presented to users in links ' + 'to the OpenStack Compute API'), + cfg.IntOpt('osapi_max_limit', + default=1000, + help='the maximum number of items returned in a single ' + 'response from a collection resource'), + cfg.StrOpt('metadata_host', + default='$my_ip', + help='the ip for the metadata api server'), + cfg.IntOpt('metadata_port', + default=8775, + help='the port for the metadata api port'), + cfg.StrOpt('default_project', + default='openstack', + help='the default project to use for openstack'), + cfg.StrOpt('default_image', + default='ami-11111', + help='default image to use, testing only'), + cfg.StrOpt('default_instance_type', + default='m1.small', + help='default instance type to use, testing only'), + cfg.StrOpt('null_kernel', + default='nokernel', + help='kernel image that indicates not to use a kernel, but to ' + 'use a raw disk image instead'), + cfg.StrOpt('vpn_image_id', + default='0', + help='image id used when starting up a cloudpipe vpn server'), + cfg.StrOpt('vpn_key_suffix', + default='-vpn', + help='Suffix to add to project name for vpn key and secgroups'), + cfg.IntOpt('auth_token_ttl', + default=3600, + help='Seconds for auth tokens to linger'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='the filename to use with sqlite'), + cfg.BoolOpt('sqlite_synchronous', + default=True, + help='If passed, use synchronous mode for sqlite'), + cfg.IntOpt('sql_idle_timeout', + default=3600, + help='timeout before idle sql connections are reaped'), + cfg.IntOpt('sql_max_retries', + default=10, + help='maximum db connection retries during startup. ' + '(setting -1 implies an infinite retry count)'), + cfg.IntOpt('sql_retry_interval', + default=10, + help='interval between retries of opening a sql connection'), + cfg.StrOpt('volume_manager', + default='cinder.volume.manager.VolumeManager', + help='full class name for the Manager for volume'), + cfg.StrOpt('scheduler_manager', + default='cinder.scheduler.manager.SchedulerManager', + help='full class name for the Manager for scheduler'), + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node. This can be an opaque identifier. ' + 'It is not necessarily a hostname, FQDN, or IP address.'), + cfg.StrOpt('node_availability_zone', + default='cinder', + help='availability zone of this node'), + cfg.StrOpt('notification_driver', + default='cinder.notifier.no_op_notifier', + help='Default driver for sending notifications'), + cfg.ListOpt('memcached_servers', + default=None, + help='Memcached servers or None for in process cache.'), + cfg.StrOpt('instance_usage_audit_period', + default='month', + help='time period to generate instance usages for. ' + 'Time period must be hour, day, month or year'), + cfg.IntOpt('bandwith_poll_interval', + default=600, + help='interval to pull bandwidth usage info'), + cfg.BoolOpt('start_guests_on_host_boot', + default=False, + help='Whether to restart guests when the host reboots'), + cfg.BoolOpt('resume_guests_state_on_host_boot', + default=False, + help='Whether to start guests that were running before the ' + 'host rebooted'), + cfg.StrOpt('default_ephemeral_format', + default=None, + help='The default format a ephemeral_volume will be ' + 'formatted with on creation.'), + cfg.StrOpt('root_helper', + default='sudo', + help='Command prefix to use for running commands as root'), + cfg.BoolOpt('use_ipv6', + default=False, + help='use ipv6'), + cfg.BoolOpt('monkey_patch', + default=False, + help='Whether to log monkey patching'), + cfg.ListOpt('monkey_patch_modules', + default=[], + help='List of modules/decorators to monkey patch'), + cfg.BoolOpt('allow_resize_to_same_host', + default=False, + help='Allow destination machine to match source for resize. ' + 'Useful when testing in single-host environments.'), + cfg.IntOpt('reclaim_instance_interval', + default=0, + help='Interval in seconds for reclaiming deleted instances'), + cfg.IntOpt('zombie_instance_updated_at_window', + default=172800, + help='Number of seconds zombie instances are cleaned up.'), + cfg.IntOpt('service_down_time', + default=60, + help='maximum time since last check-in for up service'), + cfg.StrOpt('default_schedule_zone', + default=None, + help='availability zone to use when user doesn\'t specify one'), + cfg.ListOpt('isolated_images', + default=[], + help='Images to run on isolated host'), + cfg.ListOpt('isolated_hosts', + default=[], + help='Host reserved for specific images'), + cfg.StrOpt('volume_api_class', + default='cinder.volume.api.API', + help='The full class name of the volume API class to use'), + cfg.StrOpt('auth_strategy', + default='noauth', + help='The strategy to use for auth. Supports noauth, keystone, ' + 'and deprecated.'), +] + +FLAGS.register_opts(global_opts) diff --git a/cinder/locale/bs/LC_MESSAGES/nova.po b/cinder/locale/bs/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..f2171c73d93 --- /dev/null +++ b/cinder/locale/bs/LC_MESSAGES/nova.po @@ -0,0 +1,8201 @@ +# Bosnian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-01-19 20:22+0000\n" +"Last-Translator: yazar \n" +"Language-Team: Bosnian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Neočekivana greška prilikom pokretanja komande." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Status volumena mora biti omogućen" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/cs/LC_MESSAGES/nova.po b/cinder/locale/cs/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..89cc857839c --- /dev/null +++ b/cinder/locale/cs/LC_MESSAGES/nova.po @@ -0,0 +1,8251 @@ +# Czech translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-04-04 20:28+0000\n" +"Last-Translator: Zbyněk Schwarz \n" +"Language-Team: Czech \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Jméno souboru kořenové CA" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Jméno souboru se soukromým klíčem" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "Název souboru seznamu zrušení kořenového certifikátu" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Kde uchováváme naše klíče" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Kde uchováváme naši kořenovou CA" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Použijeme CA pro každý projekt?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Předmět certifikátu pro uživatele, %s pro projekt, uživatel, časové " +"razítko" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Předmět certifikátu pro projekty, %s pro projekt, časové razítko" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Cesta příznaků: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Při spuštění příkazu došlo k nečekané chybě." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Příkaz: %(cmd)s\n" +"Kód ukončení: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "Vyjímka DB zabalena." + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "Vyskytla se neočekávaná výjimka." + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "Nelze dešifrovat text" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "Nelze stránkovat skrze obrázky ze služby obrázků" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "Vytvoření virtuálního rozhraní selhalo" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "Selhalo 5 pokusů o vytvoření virtuálního rozhraní s jedinečnou mac adresou" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "Připojení k glance selhalo" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "Připojení k melange selhalo" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "Neschváleno." + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "Uživatel nemá správcovská oprávnění" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Pravidla nedovolují, aby bylo %(action)s provedeno." + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Kernel nenalezen v obrazu %(image_id)s." + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Nepřijatelné parametry." + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "Neplatný snímek" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "Svazek %(volume_id)s není k ničemu připojen" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "Data páru klíčů jsou neplatná" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "Nelze načíst data do formátu json" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "Požadavek je neplatný." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "Neplatný podpis %(signature)s pro uživatele %(user)s." + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "Obdržen neplatný vstup" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "Neplatná instance typu %(instance_type)s." + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "Neplatný typ svazku" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "Neplatný svazek" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "Neplatný rozsah portů %(from_port)s:%(to_port)s. %(msg)s" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "Neplatný protokol IP %(protocol)s." + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Neplatný typ obsahu %(content_type)s." + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "Neplatný cidr %(cidr)s." + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "Neplatné znovu použití připojení RPC." + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" +"Nelze provést činnost '%(action)s' na agregátu %(aggregate_id)s. Důvod: " +"%(reason)s." + +#: cinder/exception.py:301 +#, fuzzy, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" +"Instance %(instance_uuid)s v %(attr)s %(state)s. Nelze %(method)s " +"zatímco je instance v tomto stavu." + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "Instance %(instance_id)s není spuštěna." + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "Instance %(instance_id)s není pozastavena." + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "Instance %(instance_id)s není v nouzovém režimu." + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "Nelze pozastavit instanci" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "Server nemůže pokračovat" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "Nelze restartovat instanci" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Nelze restartovat instanci" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "Služba je v tuto chvíli nedostupná." + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "Služba svazku je v tuto chvíli nedostupná." + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "Služba výpočtu je v tuto chvíli nedostupná." + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" +"Nelze přesunout instanci (%(instance_id)s) na současného hostitele " +"(%(host)s)." + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "Cílový výpočetní hostitel je v současnosti nedostupný." + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "Původní výpočetní hostitel je v současnosti nedostupný." + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "Zadaný typ hypervizoru je neplatný." + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "Instance vyžaduje novější verzi hypervizoru, než byla poskytnuta." + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "Zadaná cesta disku (%(path)s) již existuje, očekává se, že nebude." + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "Zadaná cesta zařízení (%(path)s) je neplatná." + +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "Zadaná cesta zařízení (%(path)s) je neplatná." + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "Nepřijatelné informace o procesoru" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "%(address)s není platná IP adresa v4/6." + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" +"Značka VLAN není vhodná pro skupinu portů %(bridge)s. Očekávaná značka " +"VLAN je %(tag)s, ale značka připojená ke skupině portů je %(pgroup)s." + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" +"vSwitch který obsahuje skupinu portů %(bridge)s není spojen s požadovaným" +" fyzickým adaptérem. Očekávaný vSwitch je %(expected)s, ale spojený je " +"%(actual)s." + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "Formát disku %(disk_format)s není přijatelný" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "Instance %(instance_id)s je nepřijatelná: %(reason)s" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "Id Ec2 %(ec2_id)s je nepřijatelné." + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "Zdroj nemohl být nalezen." + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "Požadovaný příznak %(flag)s není nastaven." + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Svazek %(volume_id)s nemohl být nastaven." + +#: cinder/exception.py:435 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "Nelze nalézt účet %(account_name) on zařízení Solidfire" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Svazek není nalezen v instanci %(instance_id)s." + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Svazek %(volume_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "Nalezeno nula typů svazku." + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Typ svazku %(volume_type_id)s nemohl být nalezen." + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Typ svazku s názvem %(volume_type_name)s nemohl být nalezen." + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Typ svazku %(volume_type_id)s nemá žádné dodatečné parametry s klíčem " +"%(extra_specs_key)s." + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Snímek %(snapshot_id)s nemohl být nalezen." + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "mazání svazku %(volume_name)s který má snímek" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "Źádný disk ve %(location)s" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "Nelze najít obslužnou rutinu pro svazek %(driver_type)s." + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Neplatný href %(image_href)s obrazu." + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" +"Některé obrazy byly uloženy pomocí href. Tato verze api nepodporuje " +"zobrazování href obrazů." + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Obraz %(image_id)s nemohl být nalezen." + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "Kernel nenalezen v obrazu %(image_id)s." + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "Uživatel %(user_id)s nemohl být nalezen." + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "Projekt %(project_id)s nemohl být nalezen." + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "Uživatel %(user_id)s není členem projektu %(project_id)s." + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "Role %(role_id)s nemohla být nalezena." + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "Nelze najít SR pro čtení/zápis VDI." + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "%(req)s je vyžadováno pro vytvoření sítě." + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "Síť %(network_id)s nemohla být nalezena." + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "Síť nemohla být pro most %(bridge)s nalezena." + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "Síť nemohla být pro uuid %(uuid)s nalezena." + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "Síť nemohla být pro cidr %(cidr)s nalezena." + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "Síť nemohla být pro instance %(instance_id)s nalezena." + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "Źádné sítě nejsou určeny." + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" +"Buď síť uuid %(network_uuid)s není přítomna nebo je přidělena projektu " +"%(project_id)s." + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "Hostitel není v síti (%(network_id)s) nastaven." + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "Nelze najít odkazy datového úložiště, který VM používá." + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "Žádná pevná IP není spojena s %(id)s." + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "Pevná ip není pro adresu %(address)s nalezena." + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "Instance %(instance_id)s má nula pevných ip." + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "Síťový hostitel %(host)s má nula pevných ip v síti %(network_id)s." + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "Instance %(instance_id)s nemá pevnou ip '%(ip)s'." + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "Hostitel %(host)s má nula pevných ip." + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "Pevná IP adresa (%(address)s) v síti (%(network_uuid)s) neexistuje." + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "Pevná IP adresa (%(address)s) je již používána." + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "Pevná IP adresa (%(address)s) je neplatná." + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "Je dostupno nula pevných ip." + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "Bylo nalezeno nula pevných ip." + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "Plovoucí ip není nalezena pro id %(id)s." + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "Záznam DNS %(name)s již v doméně %(domain)s existuje." + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "Plovoucí ip nenalezeno pro adresu %(address)s." + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "Plovoucí ip nenalezeno pro hostitele %(host)s." + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "Je dostupných nula plovoucích ip." + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "Plovoucí ip %(address)s je přidružena." + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "Plovoucí ip %(address)s není přidružena." + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "Existuje nula plovoucích ip." + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "Rozhraní %(interface)s nenalezeno." + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "Dvojice klíčů %(name)s nenalezena pro uživatele %(user_id)s" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "Certifikát %(certificate_id)s nenalezen." + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Služba %(service_id)s nemohla být nalezena." + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Hostitel %(host)s nemohl být nalezen." + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "Hostitel výpočtu %(host)s nemohl být nalezen." + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s." + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "Známka opeávnění %(token)s nemohla být nalezena." + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "Přístupový klíč %(access_key)s nemohl být nalezen." + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "Kvóta nemohla být nalezena." + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Kvóta pro projekt %(project_id)s nemohla být nalezena." + +#: cinder/exception.py:696 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "Bezpečnostní skupina %(security_group_id)s není nalezena." + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" +"Bezpečnostní skupina %(security_group_id)s není nalezena v projektu " +"%(project_id)s." + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "Bezpečnostní skupina s pravidlem %(rule_id)s nenalezena." + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" +"Bezpečnostní skupina %(security_group_id)s je již přidružena k instanci " +"%(instance_id)s" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" +"Bezpečnostní skupina %(security_group_id)s není přidružena k instanci " +"%(instance_id)s" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Přesun %(migration_id)s nemohl být nalezen." + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Přesun nenalezen v instanci %(instance_id)s se stavem %(status)s." + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "Zásoba konzole %(pool_id)s nemohla být nalezena." + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" +"Zásoba konzole typu %(console_type)s pro výpočetního hostitele " +"%(compute_host)s v hostitele proxy %(host)s nemohla být nalezena." + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "Konzole %(console_id)s nemohla být nalezena." + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "Konzole pro instanci %(instance_id)s nemohla být nalezena." + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" +"Konzole pro instanci %(instance_id)s v zásobě %(pool_id)s nemohla být " +"nalezena." + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "Neplatná konzole typu %(console_type)s " + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "Nalezeno nula typů instancí." + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "Instance typu %(instance_type_id)s nemohla být nalezena." + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "Instance typu s názvem %(instance_type_name)s nemohla být nalezena." + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "Konfigurace %(flavor_id)s nemohla být nalezena." + +#: cinder/exception.py:776 +#, fuzzy, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "Zóna %(zone_id)s nemohla být nalezena." + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "Filtr hostitelů plácinderče %(filter_name)s nemohl být nalezen." + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "Funkce nákladů plácinderče %(cost_fn_str)s nemohla být nalezena." + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "Příznak váhy plácinderče nemohl být nalezen: %(flag_name)s" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "Instance %(instance_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Instance typu %(instance_type_id)s nemá žádné dodatečné specifikace s " +"klíčem %(extra_specs_key)s." + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "Objekt LDAP nemohl být nalezen" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "Uživatel LDAP %(user_id)s nemohl být nalezen." + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "Skupina LDAP %(group_id)s nemohla být nalezena." + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "Uživatel LDAP %(user_id)s není členem skupiny %(group_id)s." + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Soubor %(file_path)s nemohl být nalezen." + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "Nalezeno nula souborů." + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "Nenalezen virtuální přepínač sdružený se síťovým adaptérem %(adapter)s." + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "Síťový adaptér %(adapter)s nemohl být nalezen." + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Třída %(class_name)s nemohla být nalezena: %(exception)s" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "Činnost není povolena." + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "Střídání není povoleno pro snímky" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "Parametr rotation je vyžadován pro backup image_type" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "Dvojice klíčů %(key_name)s již existuje." + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "Uživatel %(user)s již existuje." + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "LDAP uživatel %(user)s již existuje." + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "LDAP skupina %(group)s již existuje." + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "Uživatel %(uid)s již je členem skupiny %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "Projekt %(project)s již existuje." + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "Instance %(name)s již existuje." + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "Instance typu %(name)s již existuje." + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "Typ svazku %(name)s již existuje." + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "%(path)s je ve sdíleném úložišti: %(reason)s" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "Chyba přesunu" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Poškozené tělo zprávy: %(reason)s" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Nelze najít nastavení v %(path)s" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "Nelze načíst aplikaci vložení '%(name)s' z %(path)s" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "Při změně velikosti ji musí instance změnit také!" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "Obraz je větší než typ instance povoluje" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "1 nebo více Zón nemohlo požadavek dokončit" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "Typ paměti instance je pro požadovaný obraz příliš malý." + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "Typ disku instance je pro požadovaný obraz příliš malý." + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "Pro spuštění %(uuid)s je v uzlu výpočtu nedostatek volné paměti." + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "Nelze získat metriky šířky pásma/procesoru/disku pro tohoto hostitele." + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Nebyl nalezen žádný platný hostitel. %(reason)s" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Hostitel %(host)s není dostupný nebo neexistuje." + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "Kvóta překročena" + +#: cinder/exception.py:958 +#, fuzzy, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "Agregát %(aggregate_id)s nemá hostitele %(host)s." + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "Agregát %(aggregate_id)s nemohl být nalezen." + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "Agregát %(aggregate_name)s již existuje." + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "Agregát %(aggregate_id)s nemá hostitele %(host)s." + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "Agregát %(aggregate_id)s nemá žádná metadata s klíčem %(metadata_key)s." + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "Hostitel %(host)s již je členem jiného agregátu." + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s." + +#: cinder/exception.py:988 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "Zjištěn více než jeden svazek s názvem %(vol_name)" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " +"%(extra_specs)s" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "Nelze vytvořit typ instance" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "Chyba v odpovědi SolidFire API: stav=%(status)s" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "Chyba v odpovědi SolidFire API: data=%(data)s" + +#: cinder/exception.py:1013 +#, fuzzy, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "Zjišťěna existující vlan s id %(vlan)" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Instance %(instance_id)s nemohla být nastavena." + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "Kernel nenalezen v obrazu %(image_id)s." + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "zařízení záznamu systému musí být jedno z: %s" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Přeskakování %(full_task_name)s, zbývá %(ticks_to_skip)s tiků do dalšího " +"spuštění" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Spuštění pravidelné úlohy %(full_task_name)s" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Chyba při %(full_task_name)s: %(e)s" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "Oznamování schopností plácinderčům ..." + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "Soubor JSON představující zásady" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "Kontrolované pravidlo, když požadované není nalezeno" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "Začínající uzel %(topic)s (verze %(vcs_string)s)" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "Vytváření připojení zákazníka pro službu %s" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Ukončena služba bez záznamu v databázi" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "Objekt databáze služby zmizel, je znovu vytvářen." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Obnoveno připojení modelového serveru!" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "modelový server je nedostupný" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Úplná sada PŘÍZNAKŮ:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s : SADA PŽÍZNAKŮ " + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Vnitřní výjimka: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Získávání %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Získány neznámé argumenty klíčového slova pro utils.execute: %r" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Spouštění příkazu (podproces): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Výsledek byl %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r selhalo. Opakování." + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Spouštění příkazu (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "Prostředí není podporováno přes SSH" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "process_input není podporován přes SSH" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "ladění ve zpětném volání: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Adresa místního spojení nenalezena.: %s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Neplatná podpůrná vrstva: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "podpůrná vrstva: %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "v opakujícím volání" + +#: cinder/utils.py:927 +#, fuzzy, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Pokus získat semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/utils.py:931 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Získán semafor \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/utils.py:935 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Pokus o získání zámku souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/utils.py:942 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "Získán zámek souboru \"%(lock)s\" pro zámek metody \"%(method)s\"" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "Očekáván objekt typu: %s" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "Neplatný server_string: %s" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' trvalo %(total_time).2f sek" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "Původní výjimka je zahozena" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "Třída %(fullname)s je zastaralá: %(msg)s" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "Třída %(fullname)s je zastaralá" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "Funkce %(name)s v %(location)s je zastaralá: %(msg)s" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "Funkce %(name)s v %(location)s je zastaralá" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "%(name)s spuštěno v %(host)s:%(port)s" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "Zastavování serveru WSGI." + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "Zastavování prostého serveru TCP." + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "Spiuštění serveru TCP %(arg0)s na %(host)s:%(port)s" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "Server WSGI byl zastaven." + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "Musíte zavést __call__" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "nedostupné" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "Navrácen neserializovatelný typ: %s" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "Obalovačchyb: %s" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Příliš mnoho ověření selhalo." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" +"Přístupový klíč %(access_key)s %(failures)d krát selhal při ověření a " +"bude zablokován na %(lock_mins)d minut." + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "Podpis není zadán" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "Přístupový klíč není zadán" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "Chyba při komunikaci s keystone" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Selhání ověření: %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "Požadavek na ověření pro %(uname)s:%(pname)s)" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "činnost: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "arg: %(key)s\t\thod: %(value)s" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "Neoprávnění požadavek pro ovladač=%(controller)s a činnost=%(action)s" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "Vyvoláno InstanceNenalezena: %s" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "Vyvoláno SvazekNenalezen: %s" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "Vyvoláno SnímekNenalzen: %s" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "Vyvoláno ChybaApiEC2: %s" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "Vyvoláno DvojiceKlíčůExistuje: %s" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "Vyvoláno NeplatnáHodnotaParametru: %s" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "Vyvoláno NeplatnýROzsahPortů: %s" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "Vyvoláno NeníOprávněno: %s" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "Vyvoláno NeplatnáOperace: %s" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "Vyvolána neznámá chyba: %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Vyvolána neznámá chyba: %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "Prostředí: %s" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "Vyskytla se neznámá chyba. Prosím zopakujte Váš požadavek." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" +"Nepodporovaný požadavek API: ovladač = %(controller)s, činnost = " +"%(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/api/ec2/cloud.py:372 +#, fuzzy, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" +"Hodnota (%s) parametru GroupName je neplatná. Obsah je omezen na " +"alfanumerické znaky, mezery, pomlčky a podtržítka." + +#: cinder/api/ec2/cloud.py:378 +#, fuzzy, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" +"Hodnota (%s) parametru GroupName je neplatná. Délka překračuje maximum " +"255 znaků." + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Vytvořit dvojici klíčů %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "Imprtovat klíč %s" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Smazat dvojici klíčů %s" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy, python-format +msgid "Invalid CIDR" +msgstr "Neplatný cidr %(cidr)s." + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "Pro zadané parametry není žádné pravidlo." + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" +"Hodnota (%s) parametru GroupName je neplatná. Obsah je omezen na " +"alfanumerické znaky, mezery, pomlčky a podtržítka." + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" +"Hodnota (%s) parametru GroupName je neplatná. Délka překračuje maximum " +"255 znaků." + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Vytvořit bezpečnostní skupinu %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "skupina %s již existuje" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Smazat bezpečnostní skupinu %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Získat výstup konzole pro instanci %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "Vytvořit svazek ze snímku %s" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Vytvořit svazek o %s GB" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Odpojit svazek %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Odpojit svazek %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "vlastnost není podporována: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "svz = %s\n" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Přidělit adresu" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Uvolnit adresu %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "Přidělit adresu %(public_ip)s k instanci %(instance_id)s" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Oddělit adresu %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "Obraz musí být dostupný" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "BUde spuštěno ukončování insatncí" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Restratovat instanci %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "Instance budou zastaveny" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "Instance budou spuštěny" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "Zrušení registrace obrazu %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "Obraz %(image_location)s registrován s id %(image_id)s" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "uživatel nebo skupina nebyly zadány" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "podporována je pouze skupina \"all\"" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type musí být add nebo remove" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Aktualizace publicity obrazu %s" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "Nelze zastavit instanci za %d sek" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Zachycena chyba: %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s vrácena s HTTP %(status)d" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "Musí být určena třída ExtensionManager" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "Rozšíření %(ext_name)s: nelze rozšířit %(collection)s: Žádný takový zdroj" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Rozšíření %(ext_name)s: rozšiřování zdroje %(collection)s" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "%(user_id)s nelze nalézt se známkou '%(token)s'" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "%(user_id)s musí být správcem nebo členem %(project_id)s" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "Požadavky o ověření musí být prováděny proti verzi kořenu (např. /v2)." + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "Nelze najít %s v požadavku." + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "'%s' úspěšně ověřeno" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "Uživatel nenalezen pomocí zadaného klíče API." + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "Zadaný klíč API je platný, ale ne pro uživatele '%(username)s'" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "parametr limit musí být celé číslo" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "parametr limit musí být kladný" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "parametr offset musí být celé číslo" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "parametr offset musí být kladný" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "značka [%s] nenalezena" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s neobsahuje verzi" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "Metadata obrazu překračují limit" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "Nelze '%(action)s' zatímco instance je %(attr)s %(state)s" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "Instance je v neplatném stavu pro '%(action)s'" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "Odmítnutí žádosti o snímek, snímky jsou nyní zakázány" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "Snímky instance nejsou v současnosti povoleny." + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "Načteno rozšíření: %s" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "Název roz: %s" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "Přezdívká roz: %s" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "Popis roz: %s" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "Jmenný prostor roz: %s" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "Roz aktualizováno: %s" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "Výjimka při načítání rozšíření: %s" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "Načítání rozšíření %s" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "Volání továrny rozšíření %s" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Nelze načít rozšížení %(ext_factory)s: %(exc)s" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(classpath)s: %(exc)s" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Nelze načíst rozšíření %(ext_name)s: %(exc)s" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "JSON nelze porozumět" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "XML nelze porozumět" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "příliš mnoho klíčů těla" + +#: cinder/api/openstack/wsgi.py:582 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Rozšířený zdroj: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "Vyvolána chyba: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Vyvolána výjimka HTTP: %s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "V požadavku zadán nerozpoznaný Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "V požadavku nezadán Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "V požadavku zadáno prázdné tělo" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "Žádná taková činnost: %s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "Poškozené tělo požadavku" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Nepodporovaný Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Poškozená url požadavku" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s vrátilo chybu: %(e)s" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "prvek není podřazený" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "kořenový prvek volí seznam" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" +"Neshoda stromu šablony; přidávání sluhy %(slavetag)s k pánovi " +"%(mastertag)s" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "podtřídy musí zavádět construct()!" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "Zavádění správce rozšíření." + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "Obraz nenalezen" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "Nsprávný formát těla požadavku" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "Neshoda s tělem požadavku a URI" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "Tělo požadavku obsahuje příliš mnoho položek" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "Neplatný klíč metadata" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "Instance neexistuje" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "Instance není členem zadané sítě" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Pouze %(value)s požadavky %(verb)s mohou být provedeny pro %(uri)s " +"každých %(unit_string)s." + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "Tento požadavek má omezen množství." + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "Server neexistuje" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "Položka metadat nenalezena" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "Neplatný stav serveru: %(status)s" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "Neplatná hodnota changes-since" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "Překročen limit osobnostního souboru" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "Cesta osobnostního souboru je příliš dlouhá" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "Obsah osobnostního souboru je příliš dlouhý" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "Název serveru není řetězec nebo unicode" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "Název serveru je prázdný řetězec" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "Špatný formát osobnosti: chybí %s" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "Špatný formát osobnosti" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "Obsah osobnosti pro %s nemůže být dekódován" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "Špatný formát sítí: uuid sítě není ve správném formátu (%s)" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "Neplatná pevná IP adresa (%s)" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "Duplicitní sítě (%s) nejosu povoleny" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "Špatný formát sítě: chybí %s" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "Špatný formát sítě" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "Obsah uživatelských dat nemůže být dekódován" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "accessIPv4 není ve správném formátu IPv4" + +#: cinder/api/openstack/compute/servers.py:601 +#, fuzzy +msgid "accessIPv6 is not proper IPv6 format" +msgstr "accessIPv4 není ve správném formátu IPv4" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "Název serveru není určen" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "Zadáno neplatné flavorRef." + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "Nelze najít požadovaný obraz" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "Zadán neplatný název_klíče." + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "Instanci nebyla změněna velikost." + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "Chyba v confirm-resize %s" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "Chyba v revert-resize %s" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "Argument 'type' pro restart není HARD či SOFT" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "Chybí argument 'type' pro restart" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "Chyba v restartu %s" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "Nelze najít požadovanou konfiguraci." + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "Resize vyžaduje změnu velikosti." + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "Poškozený objekt serveru" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "Chybí vlastnost imageRef" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "Zadáno neplatné imageRef." + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "Chybí vlastnost flavorRef" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "Nebylo zadáno adminPass" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "Neplatné adminPass" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "Nelze zpracovat metadata dvojic hodnot/klíčů." + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "Požadavek na změnu velikosti má neplatnou vlastnost 'flavorRef'." + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "Požadavek na změnu velikosti vyžaduje vlastnost 'flavorRef'." + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "Neplatné tělo požadavku" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "Nelze zpracovat imageRef z požadavku." + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "Instance nemohla být nalezena" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "Nelze najít obraz ke znovu sestavení" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "Objekt createImage cyžaduje vlastnost name" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "Neplatná metadata" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "Odstraňování voleb '%(unk_opt_str)s' z fronty" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pauza %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::zruš pauzu %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::přerušení %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::pokračování %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "Chyba v přesunu %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_sítě %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "Server nenalezen" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "Compute.api::vložit_info_sítě %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::uzamčení %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::odemčení %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "Objekt createBackup vyžaduej vlastnost %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "Poškozený objekt createBackup" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "Vlastnost createBackup 'rotation' musí být celé číslo" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "Instance nenalezena" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "host a block_migration musí být zadány." + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "Migrace za chodu instance %(id)s na hostitele %(host)s selhala" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, fuzzy, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" +"Nelze vytvořit typ_svazku s názvem %(name)s a specifikacemi " +"%(extra_specs)s" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "Agregáty nemají činnost %s" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "Může být získán pouze kořenový certifikát." + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" +"Nelze získat IP pro instance VPN, ujistěte se, že není spuštěna a zkuste " +"to znovu za pár minut." + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "Chybí určení typu" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "Neplatné určení typu" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "%s musí být buď 'MANUAL' nebo 'AUTO'." + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "Server nenalezen." + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +#, fuzzy +msgid "Flavor not found." +msgstr "Server nenalezen." + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "Žádné tělo požadavku" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "Žádné další plovoucí ip v zásobníku %s." + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "Žádné další plovoucí ip nejsou dostupné." + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "Chybí parametr dict" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "Adresa není určena" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "K instanci nejsou přidruženy žádné pevné ip" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "Přidružení plovoucí ip selhalo" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Neplatný stav: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Neplatný stav: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Neplatné nastavení aktualizace: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, fuzzy, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource je funkce pouze pro správce" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Neplatná instance typu %(instance_type)s." + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy +msgid "Going to try to soft delete instance" +msgstr "Instance budou spuštěny" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy +msgid "Going to try to terminate instance" +msgstr "BUde spuštěno ukončování insatncí" + +#: cinder/compute/api.py:977 +#, fuzzy +msgid "Going to try to stop instance" +msgstr "Instance budou zastaveny" + +#: cinder/compute/api.py:996 +#, fuzzy +msgid "Going to try to start instance" +msgstr "Instance budou spuštěny" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, fuzzy, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" +"Konzole pro instanci %(instance_id)s v zásobě %(pool_id)s nemohla být " +"nalezena." + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, fuzzy, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "Instance %(instance_id)s není v nouzovém režimu." + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, fuzzy, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s." + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Zrušení registrace obrazu %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Zrušení registrace obrazu %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, fuzzy, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "Certifikát %(certificate_id)s nenalezen." + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, fuzzy, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "K instanci nejsou přidruženy žádné pevné ip" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený kontext: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "obdrženo: %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "pro zprávu není metoda: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Pro zprávu není metoda: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, fuzzy, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "Nastavování hostitele %(host)s na %(state)s." + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, fuzzy, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "Kvóta překročena" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "Nelze restartovat instanci" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "Uživatel nemá správcovská oprávnění" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +#, fuzzy +msgid "Unable to get updated status" +msgstr "Nelze vytvořit typ instance" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Nelze použít globální roli %(role_id)s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "skupina %s již existuje" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, fuzzy, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "Nenalezeno žádné cílové id ve svazku %(volume_id)s." + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy +msgid "Failed to fetch glance image" +msgstr "Nelze restartovat instanci" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "Instance budou spuštěny" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "Nelze pozastavit instanci" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "Vytvořit snímek svazku %s" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Nelze vytvořit typ instance" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "Instance %(instance_id)s nemohla být nastavena." + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Obraz musí být dostupný" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, fuzzy, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "Obraz %(image_id)s je nepřijatelný: %(reason)s" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Znovu připojeno k frontě" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "Vyvoláno Nenalezeno: %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Nelze získat metadata pro ip: %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "Špatná odpověď od SolidFire API" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "odpověď %s" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "Přístupové parametry Cinder nebyly zadány." + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "Pole virtuálního úložiště %(id)d nebylo nalezeno." + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "Pole virtuálního úložiště %(name)s nebylo nalezeno." + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "zpráva %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/da/LC_MESSAGES/nova.po b/cinder/locale/da/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..5c5ba583c74 --- /dev/null +++ b/cinder/locale/da/LC_MESSAGES/nova.po @@ -0,0 +1,8203 @@ +# Danish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-01-15 21:46+0000\n" +"Last-Translator: Soren Hansen \n" +"Language-Team: Danish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Filnavn for privatnøgle" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "bind %s: slettet" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/de/LC_MESSAGES/nova.po b/cinder/locale/de/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..b9714c3e7cc --- /dev/null +++ b/cinder/locale/de/LC_MESSAGES/nova.po @@ -0,0 +1,8208 @@ +# German translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-08-23 11:23+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: German \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Dateiname der Root CA" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Dateiname des Private Key" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Wo wir unsere Schlüssel aufbewahren" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Soll eine eigenständige CA für jedes Projekt verwendet werden?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "Das Service-Datenbank-Objekt ist verschwunden, es wird erneut erzeugt." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Alle vorhandenen FLAGS:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Hole %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Verwende Project-Name = User-Name (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "Instanz wurde bereits erstellt" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "Instanz %s: Rettung" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "Instanz %s pausiert" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "Instanz %s wird fortgesetzt" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Konnte Verbindung zum Hypervisor nicht öffnen" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "Volume %s: erfolgreich erstellt" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Nicht möglich Volumen zur Instanze %s hinzuzufügen" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Einhängepunkt%(mountpoint)s zur Instanze %(instance_name)s hinzugefügt" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "Volume %s: wird erstellt" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "Volume %s: erstelle Export" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "Volume %s: erfolgreich erstellt" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Nicht möglich volume %s zufinden" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "Betreff ist %s" + +#~ msgid "message %s" +#~ msgstr "Nachricht %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/en_AU/LC_MESSAGES/nova.po b/cinder/locale/en_AU/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..b9709b31715 --- /dev/null +++ b/cinder/locale/en_AU/LC_MESSAGES/nova.po @@ -0,0 +1,8209 @@ +# English (Australia) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-10-21 11:27+0000\n" +"Last-Translator: Tom Fifield \n" +"Language-Team: English (Australia) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Filename of root CA" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Filename of private key" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Where we keep our keys" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Where we keep our root CA" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Should we use a CA for each project?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "Subject for certificate for users, %s for project, user, timestamp" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Subject for certificate for projects, %s for project, timestamp" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Flags path: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no method for message: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Going to start terminating instances" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "User %(uid)s is already a member of the group %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "Could not attach image to loopback: %s" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "Starting %(topic)s node (version %(vcs_string)s)" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Service killed that has no database entry" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "The service database object disappeared, Recreating it." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Recovered model server connection!" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "model server went away" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Inner Exception: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "You must implement __call__" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "not available" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Too many failed authentications." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Authentication Failure: %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "Authenticated Request For %(uname)s:%(pname)s)" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "action: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "arg: %(key)s\t\tval: %(value)s" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "Unauthorised request for controller=%(controller)s and action=%(action)s" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "InstanceNotFound raised: %s" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "VolumeNotFound raised: %s" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound raised: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "Unexpected error raised: %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Unexpected error raised: %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "An unknown error has occurred. Please try your request again." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "Unsupported API request: controller = %(controller)s, action = %(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Create key pair %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Delete key pair %s" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revoke security group ingress %s" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, fuzzy, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "Not enough parameters to build a valid rule." + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "No rule for the specified parameters." + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorise security group ingress %s" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "This rule already exists in group %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Create Security Group %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "group %s already exists" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Delete security group %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output for instance %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume of %s GB" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Detach volume %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Detach volume %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "attribute not supported: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Allocate address" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Release address %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "Associate address %(public_ip)s to instance %(instance_id)s" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Disassociate address %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "Going to start terminating instances" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Reboot instance %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "De-registering image %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "Registered image %(image_location)s with id %(image_id)s" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "user or group not specified" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "only group \"all\" is supported" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type must be add or remove" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Updating image %s publicity" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Caught error: %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Invalid backend: %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Revoke security group ingress %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "Not enough parameters to build a valid rule." + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "This rule already exists in group %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Attempted to instantiate singleton" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "Looking up user: %r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Failed authorisation for access key %s" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Using project name = user name (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "failed authorisation: no project named %(pjid)s (user=%(uname)s)" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" +"Failed authorisation: user %(uname)s not admin and not member of project " +"%(pjname)s" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Invalid signature for user %s" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "Must specify project" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "Adding role %(role)s to user %(uid)s in project %(pid)s" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "Adding sitewide role %(role)s to user %(uid)s" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "Removing role %(role)s from user %(uid)s on project %(pid)s" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "Removing sitewide role %(role)s from user %(uid)s" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "Created project %(name)s with manager %(manager_user)s" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "modifying project %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Adding user %(uid)s to project %(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Remove user %(uid)s from project %(pid)s" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Deleting project %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "Created user %(rvname)s (admin: %(rvadmin)r)" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Deleting user %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "Access Key change for user %s" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Secret Key change for user %s" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "Admin status set to %(admin)r for user %(uid)s" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "No vpn data for project %s" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Get console output for instance %s" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "Network to push into openvpn config" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "Netmask to push into openvpn config" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "Launching VPN for %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +#, fuzzy, python-format +msgid "Cannot run any more instances of this type." +msgstr "Instance quota exceeded. You can only run %s more instances of this type." + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "Instance quota exceeded. You can only run %s more instances of this type." + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "Creating a raw instance" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "Going to run %s instances..." + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy, python-format +msgid "Going to try to soft delete instance" +msgstr "Going to try to terminate %s" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy, python-format +msgid "Going to try to terminate instance" +msgstr "Going to try to terminate %s" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "Going to try to terminate %s" + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "Going to try to terminate %s" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "After terminating instances: %s" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "Instance has already been created" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instance %s: snapshotting" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "instance %s: rescuing" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "instance %s: unrescuing" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "instance %s: pausing" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "instance %s: unpausing" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instance %s: retrieving diagnostics" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "instance %s: suspending" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "instance %s: resuming" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "instance %s: locking" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "instance %s: unlocking" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instance %s: getting locked state" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "instance %s: reset network" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Detaching volume from unknown instance %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "Adding console" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "Tried to remove non-existent console %(console_id)s." + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "Rebuilding xvp conf" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "Re-wrote %s" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "Stopping xvp" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "Starting xvp" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "Error starting xvp: %s" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "Restarting xvp" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp not running..." + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "python-migrate is not installed. Exiting." + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "Failed to decrypt private key: %s" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "Failed to decrypt initialisation vector: %s" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "Failed to decrypt image file %(image_file)s: %(err)s" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Hupping dnsmasq threw %s" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d is stale, relaunching dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "killing radvd threw %s" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d is stale, relaunching radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Starting VLAN inteface %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Starting Bridge interface for %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Starting Bridge interface for %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Error starting xvp: %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "setting network host" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "IP %s released that was not leased" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Returning exception %s to caller" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "unpacked context: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "received %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "no method for message: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "No method for message: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "Must implement a fallback schedule" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "Running instances: %s" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "After terminating instances: %s" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "start address" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "Target %s allocated" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Nested received %(queue)s, %(value)s" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "Nested return %s" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "Received %s" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Failed to open connection to the hypervisor" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "Attempted to unfilter instance %s which is not filtered" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "Got exception: %s" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "instance %(instance_name)s: deleting instance files %(target)s" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "instance %s: rebooted" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "instance %s: rescued" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "instance %s: booted" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "instance %s: Creating image" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instance %s: starting toXML method" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instance %s: finished toXML method" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Failed to mount filesystem: %s" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "Could not attach image to loopback: %s" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "No free nbd devices" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "nbd device %s did not show up" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Connecting to libvirt: %s" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "Connection to libvirt broke" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "instance %s: rebooted" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "data: %(data)r, fpath: %(fpath)r" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "Unable to find an open port" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Raising NotImplemented" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Calling %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Calling getter %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Found non-unique network for bridge %s" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "Found no network for bridge %s" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Unable to detach volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD not found in instance %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "group %s already exists" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Unable to unplug VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "Snapshotting VM %(vm_ref)s with label '%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "Size for image %(image)s:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copying VDI %s to /boot/guest on dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk VDI %s destroyed" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Looking up vdi %s for PV kernel" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s is still available" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-scanning SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s has parent %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Plugging VBD %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Plugging VBD %s done." + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destroying VBD for VDI %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Destroying VBD for VDI %s done." + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "Running pygrub against %s" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Found Xen kernel %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "No Xen kernel found. Booting HVM." + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "Writing partition table %s done." + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "Creating a raw instance" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "Injecting file path: '%s'" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +#, fuzzy +msgid "Starting VM" +msgstr "Restarting xvp" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "Finished snapshot and upload for VM %s" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "Starting snapshot for VM %s" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Unable to attach volume to instance %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "kernel/ramdisk files removed" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +#, fuzzy +msgid "Destroying VM" +msgstr "Restarting xvp" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy +msgid "Injecting network info to xenstore" +msgstr "setting network host" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "OpenSSL error: %s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "Unable to create Storage Repository" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "Introduced %(label)s as %(sr_ref)s." + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "Introducing %s..." + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Unable to find SR from VBD %s" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Unable to introduce VDI on SR %s" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Unable to get record of VDI %s on" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Unable to introduce VDI for SR %s" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Mountpoint cannot be translated: %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to use SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Unable to attach volume to instance %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Unable to detach volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recovering from a failed execute. Try number %s" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd has no pool %s" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog is not working: %s" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "Sheepdog is not working" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating logical volume of size %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "Volume is still attached" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnected to queue" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound raised: %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Could not find iSCSI export for volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Failed to get metadata for ip: %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "response %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "volume group %s doesn't exist" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "trying to destroy already destroyed instance: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "Initing the Adapter Consumer for %s" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "topic is %s" + +#~ msgid "message %s" +#~ msgstr "message %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) publish (key: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "Publishing to route %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Declaring queue %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Declaring exchange %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "Binding %(queue)s to %(exchange)s with key %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Getting from %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "Task [%(name)s] %(task)s status: success %(result)s" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "virsh said: %r" + +#~ msgid "cool, it's a device" +#~ msgstr "cool, it's a device" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "Created VM %s..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "Created VM %(instance_name)s as %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Creating VBD for VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "Creating VBD for VDI %s done." + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.unplug successful first time." + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD.unplug rejected: retrying..." + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.unplug successful eventually." + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "Ignoring XenAPI.Failure in VBD.unplug: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "Ignoring XenAPI.Failure %s" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "instance %s: Failed to spawn" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "Starting VM %s..." + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/en_GB/LC_MESSAGES/nova.po b/cinder/locale/en_GB/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..cb8d062f318 --- /dev/null +++ b/cinder/locale/en_GB/LC_MESSAGES/nova.po @@ -0,0 +1,8209 @@ +# English (United Kingdom) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-03-30 11:10+0000\n" +"Last-Translator: Anthony Harrington \n" +"Language-Team: English (United Kingdom) \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Filename of root CA" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Filename of private key" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "Filename of root Certificate Revocation List" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Where we keep our keys" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Where we keep our root CA" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Should we use a CA for each project?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "Subject for certificate for users, %s for project, user, timestamp" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Subject for certificate for projects, %s for project, timestamp" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Flags path: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Unexpected error while running command." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "DB exception wrapped." + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "An unknown exception occurred." + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "Failed to decrypt text" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "Failed to paginate through images from image service" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "Virtual Interface creation failed" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "5 attempts to create virtual interface with unique mac address failed" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "Connection to glance failed" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "Connection to melange failed" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "Not authorised." + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "User does not have admin privileges" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Policy doesn't allow %(action)s to be performed." + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Unacceptable parameters." + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "Invalid snapshot" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "Volume %(volume_id)s is not attached to anything" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "Keypair data is invalid" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "Failed to load data into json format" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "The request is invalid." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "Invalid signature %(signature)s for user %(user)s." + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "Invalid input received" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "Invalid instance type %(instance_type)s." + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "Invalid volume type" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "Invalid volume" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "Invalid IP protocol %(protocol)s." + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Invalid content type %(content_type)s." + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "Invalid cidr %(cidr)s." + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "Invalid reuse of an RPC connection." + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "Instance %(instance_id)s is not running." + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Full set of FLAGS:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Inner Exception: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Fetching %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Running cmd (subprocess): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Result was %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Running cmd (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Link Local address is not found.:%s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Invalid backend: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy, python-format +msgid "Invalid CIDR" +msgstr "Invalid cidr %(cidr)s." + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output for instance %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Invalid backend: %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Invalid instance type %(instance_type)s." + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "Instance has already been created" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instance %s: snapshotting" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "instance %s: rescuing" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "instance %s: pausing" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instance %s: retrieving diagnostics" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "instance %s: suspending" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "instance %s: resuming" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "instance %s: locking" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "instance %s: unlocking" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instance %s: getting locked state" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "instance %s: reset network" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Detaching volume from unknown instance %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "Use of empty request context is deprecated" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Hupping dnsmasq threw %s" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d is stale, relaunching dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "killing radvd threw %s" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d is stale, relaunching radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Starting VLAN inteface %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Starting Bridge interface for %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Starting Bridge interface for %s" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Nested received %(queue)s, %(value)s" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "Nested return %s" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "Received %s" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Failed to open connection to the hypervisor" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "volume %s: created successfully" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "User does not have admin privileges" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Raising NotImplemented" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake does not have an implementation for %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Calling %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Calling getter %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Unable to detach volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD not found in instance %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Unable to unplug VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Unable to destroy VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "Snapshotting VM %(vm_ref)s with label '%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "Size for image %(image)s:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copying VDI %s to /boot/guest on dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk VDI %s destroyed" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Looking up vdi %s for PV kernel" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s is still available" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-scanning SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s has parent %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Plugging VBD %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Plugging VBD %s done." + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destroying VBD for VDI %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Destroying VBD for VDI %s done." + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "Running pygrub against %s" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Found Xen kernel %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "No Xen kernel found. Booting HVM." + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "Writing partition table %s done." + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy, python-format +msgid "Starting instance" +msgstr "Rebooting instance %s" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Unable to attach volume to instance %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "Instance %(instance_id)s is not running." + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Unable to use SR %(sr_ref)s for instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Unable to attach volume to instance %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Unable to detach volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume status must be available" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exporting %s volumes" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: skipping export" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creating" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creating lv of size %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creating export" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: created successfully" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "Volume is still attached" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "Volume is not local to this node" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removing export" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: deleting" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: deleted successfully" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: snapshotting" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Unable to locate volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "trying to destroy already destroyed instance: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) publish (key: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "Publishing to route %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Declaring queue %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Declaring exchange %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "Binding %(queue)s to %(exchange)s with key %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Getting from %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "Created VM %s..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "Created VM %(instance_name)s as %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Creating VBD for VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "Creating VBD for VDI %s done." + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.unplug successful first time." + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD.unplug rejected: retrying..." + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.unplug successful eventually." + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "Ignoring XenAPI.Failure in VBD.unplug: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "Ignoring XenAPI.Failure %s" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "instance %s: Failed to spawn" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/es/LC_MESSAGES/nova.po b/cinder/locale/es/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..836900e3517 --- /dev/null +++ b/cinder/locale/es/LC_MESSAGES/nova.po @@ -0,0 +1,8220 @@ +# Spanish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-03-10 06:08+0000\n" +"Last-Translator: Oscar Rosario \n" +"Language-Team: Spanish \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Nombre de fichero de la CA raíz" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Nombre de fichero de la clave privada" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Donde guardamos nuestras claves" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Dónde guardamos nuestra CA raíz" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "¿Deberíamos usar una CA para cada proyecto?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado de usuarios, %s para el proyecto, " +"usuario, marca de tiempo" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" +"Sujeto (Subject) para el certificado del proyecto, %s para el proyecto, " +"marca de tiempo" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Ruta a las opciones: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Error inesperado mientras el comando se ejecutaba" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de salida: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "Una excepcion desconocida ha ocurrido" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "Fallo al desencriptar el texto" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "Creacion de interfaz virtual fallida" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "Coneccion con glance fallida" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "No Autorizado" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "El usuario no tiene privilegios de administrador" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "no hay método para el mensaje: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Parametros inaceptables" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "Captura no valida" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "Fallo al ingresar informacion en formato json" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "La petición es inválida." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "Firma invalida %(signature)s para el usuario %(user)s" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "Entrada invalida recibida" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "Rango de puertos invalido %(from_port)s:%(to_port)s. %(msg)s" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "Protocolo IP invalido %(protocol)s" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Tipo de contenido invalido %(content_type)s." + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "Cidr %(cidr)s invalido" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "Reuso invalido de una coneccion RPC" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "La instacia %(instance_id)s no se esta ejecutando" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "La instacia %(instance_id)s no esta suspendida" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "La instancia %(instance_id)s no esta en modo de rescate" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "Fallo al suspender la instancia" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "Fallo al resumir el servidor" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "Fallo a reinicia la instancia" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Fallo a reinicia la instancia" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "El servicio no esta disponible en este momento" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "El servicio de computo no esta disponible en este momento" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" +"Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion " +"(%(host)s)" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "El destino del anfitrion de computo no esta disponible en este momento" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "El anfitrion de computo no esta disponible en este momento" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "Informacion del CPU inaceptable" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "%(address)s no es una direccion IP v4/6 valida" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "Formato de disco %(disk_format)s no es aceptable" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "El usuario %(uid)s es actualmente miembro del grupo %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "No se puede unir la imagen con el loopback: %s" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones (FLAGS):" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Excepción interna: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Obteniendo %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Ejecutando cmd (subprocesos): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "El resultado fue %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "corriendo cmd (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "Depuración de la devolución de llamada: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "No se encuentra la dirección del enlace local.:%s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "backend inválido: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Demasiados intentos de autenticacion fallidos." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Fallo de autenticación: %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "acción: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "No encontrado: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "Sucedió un error inexperado: %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Sucedió un error inexperado: %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "Ha sucedido un error desconocido. Por favor repite el intento de nuevo." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Creando par de claves %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Borrar para de claves %s" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy, python-format +msgid "Invalid CIDR" +msgstr "Cidr %(cidr)s invalido" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revocar ingreso al grupo de seguridad %s" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "No hay regla para los parámetros especificados." + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizar ingreso al grupo de seguridad %s" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "Esta regla ya existe en el grupo %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Crear Grupo de Seguridad %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "el grupo %s ya existe" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Borrar grupo de seguridad %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obtener salida de la consola para la instancia %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Crear volumen de %s GB" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Desasociar volumen %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Desasociar volumen %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo no soportado: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Asignar dirección" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Liberar dirección %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Desasociar dirección %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "Se va a iniciar la finalización de las instancias" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instancia %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "Des-registrando la imagen %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "usuario o grupo no especificado" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "sólo el grupo \"all\" está soportado" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type debe ser añadir o eliminar" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Actualizando imagen %s públicamente" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "backend inválido: %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Revocar ingreso al grupo de seguridad %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regla ya existe en el grupo %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Intento de instanciar sigleton" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Se ha intentado eliminar el último miembro de un grupo. Eliminando el " +"grupo %s en su lugar." + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "Buscando usuario: %r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Fallo de autorización para la clave de acceso %s" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Firma invalida para el usuario %s" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "Debes especificar un proyecto" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "Creado el proyecto %(name)s con administrador %(manager_user)s" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "Modificando proyecto %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Agregando usuario %(uid)s para el proyecto %(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Borrar usuario %(uid)s del proyecto %(pid)s" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Borrando proyecto %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Borrando usuario %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "Cambio de clave de acceso para el usuario %s" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Cambio de clave secreta para el usuario %s" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "No hay datos vpn para el proyecto %s" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Obtener salida de la consola para la instancia %s" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "Red a insertar en la configuración de openvpn" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "Mascara de red a insertar en la configuración de openvpn" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "Lanzando VPN para %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +#, fuzzy, python-format +msgid "Cannot run any more instances of this type." +msgstr "" +"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de " +"este tipo." + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "" +"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de " +"este tipo." + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "Creando una instancia raw" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "Vamos a ejecutar %s insntacias..." + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy, python-format +msgid "Going to try to soft delete instance" +msgstr "Vamos a ejecutar %s insntacias..." + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy +msgid "Going to try to terminate instance" +msgstr "Se va a iniciar la finalización de las instancias" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "Vamos a ejecutar %s insntacias..." + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "Vamos a ejecutar %s insntacias..." + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: ejecutando: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: no ejecutando |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "Después de terminar las instancias: %s" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "La instancia ha sido creada previamente" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Reiniciando instancia %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instancia %s: creando snapshot" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "instancia %s: rescatando" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "instancia %s: pausando" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "instancia %s: continuando tras pausa" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instancia %s: obteniendo los diagnosticos" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "instancia %s: suspendiendo" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "instancia %s: continuando" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "instancia %s: bloqueando" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "instancia %s: desbloqueando" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instancia %s: pasando a estado bloqueado" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "instancia %s: reiniciar redes" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, fuzzy, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "La instancia %(instance_id)s no esta en modo de rescate" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Desvinculando volumen de instancia desconocida %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Excepción al recargar la configuración de dnsmasq: %s" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "El pid %d está pasado, relanzando dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "Matando radvd lanzado %s" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d corrupto, relanzando radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Iniciando interfaz VLAN %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Iniciando interfaz puente para %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Iniciando interfaz puente para %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Des-registrando la imagen %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "configurando la red del host" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "recibido %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, fuzzy, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "La instacia %(instance_id)s no esta suspendida" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "Ejecutando instancias: %s" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "Después de terminar las instancias: %s" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "Destino %s asignado" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "Recibido %s" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Fallo al abrir conexión con el hypervisor" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"Debes especificar xenapi_connection_url, xenapi_connection_username " +"(opcional), y xenapi_connection_password para usar connection_type=xenapi" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "Obtenida excepción %s" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "instancia %s: reiniciada" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "instancia %s: rescatada" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "instancia %s: arrancada" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "instancia %s: Creando imagen" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instancia %s: comenzando método toXML" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instancia %s: finalizado método toXML" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Fallo al montar el sistema de ficheros: %s" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "No se puede unir la imagen con el loopback: %s" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Conectando a libvirt: %s" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "Conexión a libvirt rota" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "instancia %s: reiniciada" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "Fallo a reinicia la instancia" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "El usuario no tiene privilegios de administrador" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Lanzando NotImplemented" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake no tiene una implementación para %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Llamando %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Llanado al adquiridor %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " +"número incorrecto de argumentos" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Encontrada una red no única para el puente %s" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "No se ha encontrado red para el puente %s" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Imposible desasociar volumen %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD no encontrado en la instancia %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "el grupo %s ya existe" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Imposible desconectar VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Imposible destruir VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"VDI creado %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) " +"sobre %(sr_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "Creando snapshot de la VM %(vm_ref)s con etiqueta '%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Instantánea creada %(template_vm_ref)s de la VM %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Pidiendo xapi a subir %(vdi_uuids)s como ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "Tamaño para imagen %(image)s:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copiando VDI %s a /boot/guest on dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk VDI %s destruído" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy +msgid "Failed to fetch glance image" +msgstr "Fallo a reinicia la instancia" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Buscando vid %s para el kernel PV" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s está todavía disponible" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-escaneando SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s tiene origen en %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "Ejecutando pygrub contra %s" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Kernel Xen Encontrado %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "Kernel Xen no encontrado. Reiniciando HVM" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "Creando una instancia raw" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "Fallo al suspender la instancia" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "Finalizado el snapshot y la subida de la VM %s" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "Comenzando snapshot para la VM %s" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Imposible adjuntar volumen a la instancia %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "La instacia %(instance_id)s no esta suspendida" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy +msgid "Injecting network info to xenstore" +msgstr "configurando la red del host" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "Imposible crear el repositorio de almacenamiento" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "Introduciendo %s..." + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Incapaz de insertar VDI en SR %s" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Imposible obtener copia del VDI %s en" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Inposible insertar VDI para SR %s" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Punto de montaje no puede ser traducido: %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"No es posible crear el VDI en SR %(sr_ref)s para la instancia " +"%(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "No es posible usar SR %(sr_ref)s para la instancia %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Imposible adjuntar volumen a la instancia %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" +"El punto de montaje %(mountpoint)s esta unido a la instancia " +"%(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Volume_separado: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Imposible desasociar volumen %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" +"El punto de montaje %(mountpoint)s se desligó de la instancia " +"%(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "El estado del volumen debe estar disponible" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: saltando exportación" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volumen %s: creando" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: creando lv del tamaño %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "El volumen todavía está asociado" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instancia %s: creando snapshot" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado a la cola" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "No encontrado: %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Imposible encontrar SR en VBD %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible encontrar volumen %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "respuesta %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "el grupo de volumenes %s no existe" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "mensaje %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) publica (key: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "Publicando la ruta %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Declarando cola %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Declarando intercambio %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "Enlazando %(queue)s a %(exchange)s con la llave %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Obtendiendo desde %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "virsh dijo: %r" + +#~ msgid "cool, it's a device" +#~ msgstr "genial, es un dispositivo" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "Creada VM %s..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "VM creada %(instance_name)s como %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Creando VBD para VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "Creando VBF para VDI %s terminado" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.Primera desconexión satisfactoria." + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD. Desconexión rechazada: reintentándolo..." + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.Finalmente logro desconectar." + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "Instancia %s: no se pudo iniciar" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "Iniciando VM %s..." + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "Creado el VIF %(vif_ref)s para VM %(vm_ref)s, red %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Creando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/fr/LC_MESSAGES/nova.po b/cinder/locale/fr/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..036363370ca --- /dev/null +++ b/cinder/locale/fr/LC_MESSAGES/nova.po @@ -0,0 +1,8251 @@ +# French translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-04-06 14:54+0000\n" +"Last-Translator: EmmanuelLeNormand \n" +"Language-Team: French \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Nom du fichier contenant la racine de l'autorité de certification" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Nom de fichier de la clé privée" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "Nom du fichier de la liste de révocation du Certificat Racine" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Emplacement de sauvegarde des clefs" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Emplacement de sauvegarde des racines d'autorité de certification" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Doit-on utiliser une autorité de certification pour chaque projet ?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Sujet pour les certificats utilisateurs, %s pour le projet, utilisateur, " +"timestamp" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Sujet de certificat pour projets, %s pour le projet, timestamp" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Chemin des propriétés: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Erreur imprévue lors de l'éxecution de la commande" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Commande : %(cmd)s\n" +"Valeur retournée : %(exit_code)s\n" +"Sortie standard : %(stdout)r\n" +"Sortie d'erreur : %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "Une exception inconnue s'est produite." + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "Échec du décryptage du text" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "Échec de la pagination à travers les images depuis le service d'image" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "La création de l'Interface Virtuelle a échoué" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" +"Les 5 tentatives de création de l'interface virtuelle avec une adresse " +"MAC unique ont échoué" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "La connexion à Glance a échoué" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "La connexion à Melange a échoué" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "Non autorisé." + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "L’utilisateur n'a pas les privilèges administrateur" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Le réglage des droits n'autorise pas %(action)s à être effectué(e)(s)" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Pas de méthode pour le message : %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Paramètres inacceptables." + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "Snapshot invalide" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "Le volume %(volume_id)s n'est lié à rien" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "La paire de clés de donnée n'est pas valide" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "Échec du chargement des données au format JSON" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "La requête est invalide." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "La signature %(signature)s est invalide pour l'utilisateur %(user)s." + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "Entrée invalide reçue" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "L'instance de type %(instance_type)s est invalide." + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "Type de volume invalide" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "Volume invalide" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "La plage de port %(from_port)s:%(to_port)s. %(msg)s est invalide" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "Le protocole IP %(protocol)s est invalide" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Le type de contenu %(content_type)s est invalide" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "Le cidr %(cidr)s est invalide" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "Réutilisation invalide d'une connexion RPC" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" +"Impossible d'effectuer l'action '%(action)s' sur l'ensemble " +"%(aggregate_id)s. Raison: %(reason)s." + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" +"L'instance %(instance_uuid)s dans %(attr)s %(state)s. Impossible de " +"%(method)s pendant que l'instance est dans cet état." + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "L'instance %(instance_id)s ne fonctionne pas." + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "L'instance %(instance_id)s n'est pas suspendue" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "L'instance %(instance_id)s n'est pas en mode secours" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "Échec de la suspension de l'instance" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "Échec de la reprise du serveur" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "Échec du redémarrage de l'instance" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Échec du redémarrage de l'instance" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "Le service est indisponible actuellement." + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "Le service de volume est indisponible actuellement." + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "Le service de calcul est indisponible actuellement." + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" +"Impossible de migrer l'instance (%(instance_id)s) vers l'hôte actuel " +"(%(host)s)." + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "L'hôte destinataire de calcul est indisponible actuellement." + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "L'hôte original de calcul est indisponible actuellement." + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "Le type de l'hyperviseur fourni n'est pas valide." + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" +"L'instance nécessite une version plus récente de l'hyperviseur que celle " +"fournie." + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "L'utilisateur %(uid)s est déjà membre du groupe %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "Impossible de lier l'image au loopback : %s" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "Démarrage du noeud %(topic)s (version %(vcs_string)s)" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Service détruit sans entrée dans la base de données" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "L'objet du service de base de données à disparru, re-création en cours." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Récupération du modelle de connexion serveur terminée!" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "Le modèle de serveur à disparu" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Ensemble de propriétés complet :" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Exception interne : %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Récupèration de %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Execution de la commande (sous-processus) : %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Le résultat était %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Execution de la cmd (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "Debug dans le rappel : %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "L'adresse du lien local n'a pas été trouvé :%s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend invalide : %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "Vous devez implémenter __call__" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "non disponible" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Trop d'erreur d'authentification" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" +"La clef d'accès %(access_key)s a rencontrée %(failures)d echecs " +"d'authentification et sera par conséquent vérouillée pour %(lock_mins)d " +"minutes." + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Echec d'authentification : %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "Requête authentifiée pour : %(uname)s:%(pname)s)" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "action: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "arg: %(key)s\t\tval: %(value)s" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" +"Requête non authorisé pour le controlleur=%(controller)s et " +"l'action=%(action)s" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "\"Instance non trouvée\" remontée : %s" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "\"Volume non trouvé\" remonté : %s" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "\"Erreur inopinée\" remontée : %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "\"Erreur inopinée\" remontée : %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "Une erreur inopinée à eu lieu. Merci d'essayer votre requête à nouveau." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" +"Requête API non supportée : controleur = %(controller)s, action = " +"%(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Création du bi-clef %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Suppression du bi-clef %s" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy, python-format +msgid "Invalid CIDR" +msgstr "Le cidr %(cidr)s est invalide" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Révocation de groupe de sécurité %s" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, fuzzy, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "Pas assez de parametres pour contruire un règle valide." + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "Pas de règle pour les paramètres spécifiés." + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorisation du groupe de sécurité %s" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "Cette règle existe déjà dans le groupe %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Création du groupe de sécurité %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "le groupe %s existe déjà" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Suppression du groupe de sécurité %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Récupération de la sortie de la console de l'instance %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Création d'un volume de %s Go" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" +"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant " +"que %(device)s" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Dé-montage du volume %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Dé-montage du volume %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "attribut non reconnu : %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Allocation d'adresse" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Désallocation de l'adresse %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "Association de l'adresse %(public_ip)s avec l'instance %(instance_id)s" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Désassociation de l'adresse %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "Début de la destruction d'instance" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Re-démarrage de l'instance %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "Dé-enregitrement de l'image %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "Image %(image_location)s enregistré avec l'id %(image_id)s" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "Utilisateur ou groupe non spécifié" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "Seul le group \"tous\" est supporté" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" +"le type d'opération (operation_type) doit être ajout (add) ou suppression" +" (remove)" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Mis à jour de la publication de l'image %s" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Erreur interceptée : %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Backend invalide : %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Révocation de groupe de sécurité %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "Pas assez de parametres pour contruire un règle valide." + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Cette règle existe déjà dans le groupe %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentative d'instanciation d'un singleton" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Tentative de suppression du dernier membre d'un groupe. Essayez plutôt " +"de supprimer le group sur %s." + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "Recherche de l'utilisateur : %r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Autorisation refusée pour la clef d'accès %s" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Nom de projet utilisé = nom d'utilisateur (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" +"Autorisation refusée : pas de projet nommé %(pjid)s " +"(utilisateur=%(uname)s)" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" +"Autorisation refusée : utilisateur %(uname)s n'est ni admin ni membre du " +"projet %(pjname)s" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Signature non valide pour l'utilisateur %s" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "Le projet doit être spécifié" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "Ajout du rôle %(role)s à l'utilisateur %(uid)s pour le projet %(pid)s" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "Ajout du rôle global %(role)s pour l'utilisateur %(uid)s" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" +"Suppression du rôle %(role)s pour l'utilisateur %(uid)s dans le projet " +"%(pid)s" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "Suppression du role global %(role)s pour l'utilisateur %(uid)s" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "Création du projet %(name)s ayant pour manager %(manager_user)s" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "modification du projet %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Ajout de l'utilisateur %(uid)s au projet %(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Suppression de l'utilisateur %(uid)s du projet %(pid)s" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Suppression du projet %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "Utilisateur créé %(rvname)s (admin: %(rvadmin)r)" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Suppression de l'utilisateur %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "Clef d'accès changée pour l'utilisateur %s" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Clef secrète changée pour l'utilisateur %s" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "Statut admin changé en %(admin)r pour l'utilisateur %(uid)s" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "Pas de données VPN pour le projet %s" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "L'instance de type %(instance_type)s est invalide." + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "Réseau à passer à la configuration d'openvpn" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "Masque réseau à passer à la configuration d'openvpn" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "Démarrage du VPN pour %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +#, fuzzy, python-format +msgid "Cannot run any more instances of this type." +msgstr "" +"Quota d'instances dépassé. Vous ne pouvez éxécuter que %s instances de ce" +" type de plus." + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "" +"Quota d'instances dépassé. Vous ne pouvez éxécuter que %s instances de ce" +" type de plus." + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "Création d'une instance raw" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "Démarrage de %s instances..." + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy, python-format +msgid "Going to try to soft delete instance" +msgstr "Va essayer d'arrêter %s" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy, python-format +msgid "Going to try to terminate instance" +msgstr "Va essayer d'arrêter %s" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "Va essayer d'arrêter %s" + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "Va essayer d'arrêter %s" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: décoration : |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: vérouillé : |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin : |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: exécution : |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: ne s'éxécute pas |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "Après l'arrêt d'instances : %s" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "L'instance a déjà été crée" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Redémarrage de l'instance %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "instance %s: récupération" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "instance %s: dé-récupération" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "instance %s: mise en pause" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "instance %s: reprise après pause" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instance %s: récupération des diagnostiques" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "instance %s: suspension" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "instance %s: reprise après suspension" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "instance %s: vérrouillage" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "instance %s: déverrouillage" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instance %s: récupération de l'état de vérouillage" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "instance %s: redémarrage du réseau" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, fuzzy, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "L'instance %(instance_id)s n'est pas en mode secours" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Démontage de volume d'une instance inconnue %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "Ajout de console" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "Tentative de suppression d'une console non existante %(console_id)s." + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "Reconstruction de la configuration xvp" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "Ré-écriture de %s" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "Arrêt xvp" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "Démarrage xvp" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "Erreur au démarrage xvp : %s" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "Re-démarrage xvp" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp non actif..." + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "L'utilisation d'une requête de contexte vide est dévalué" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "python-migrate n'est pas installé. Fin d'éxécution." + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "Impossible de déchiffrer la clef privée : %s" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "Impossible de déchiffrer le vecteur d'initialisation : %s" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "Impossible de déchiffrer le fichier image %(image_file)s: %(err)s" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Hupping dnsmasq à renvoyé %s" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d est dépassé, re-démarrage de dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "La destruction de radvd à renvoyé %s" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d est dépassé, re-démarrage radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Démarrage de l'interface VLAN %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Démarrage de l'interface de Bridge %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Démarrage de l'interface de Bridge %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Erreur au démarrage xvp : %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "réglage de l'hôte réseau" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "IP %s libérée qui n'était pas allouée" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "La somme du nombre de réseau et le début de vlan ne peut excéder 4094" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Renvoi de l'exception %s à l'appelant" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "Contexte décompacté : %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "%s reçu" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Pas de méthode pour le message : %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID est %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "Doit mettre en oeuvre un calendrier de retrait" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, fuzzy, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "L'instance %(instance_id)s n'est pas suspendue" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "Instance actives : %s" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "Après l'arrêt d'instances : %s" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "adresse de départ" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "Destination %s allouée" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Reception par Nested %(queue)s, %(value)s" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "Nested renvoi %s" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "%s Reçu" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Échec lors de l'ouverture d'une connexion à l'hyperviseur" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "Tentative de suppression de filtre pour l'intance %s qui n'est pas filtrée" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"Doit spécifier xenapi_connection_url, xenapi_connection_username " +"(optionel), et xenapi_connection_password pour utiliser " +"connection_type=xenapi" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "Reçu exception : %s" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "instance %(instance_name)s: suppression des fichiers d'instance %(target)s" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "instance %s: re-démarrée" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "instance %s: récupérée" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "instance %s: a démarrée" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "instance %s : Création de l'image" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" +"instance %(inst_name)s : l'erreur d'injection de donné dans l'image " +"%(img_id)s (%(e)s) a été ignorée" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instance %s: démarrage de la méthode toXML" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instance %s: fin d'éxécution de la méthode toXML" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Impossible de monter le système de fichier : %s" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "Impossible de lier l'image au loopback : %s" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "Pas de device nbd libre" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "Device nbd %s n'est pas apparu" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Connexion à libvirt: %s" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "Connexion à libvirt interrompue" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "instance %s: re-démarrée" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "Échec du redémarrage de l'instance" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "data: %(data)r, fpath: %(fpath)r" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "L’utilisateur n'a pas les privilèges administrateur" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "Impossible de trouver un port ouvert" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Fonction non implémentée" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake n'a pas d'implémentation pour %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Appel %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Appel du getter %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake n'a pas d'implementation pour %s ou il a été appelé avec le " +"mauvais nombre d'arguments" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Réseau non unique trouvé pour le bridge %s" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "Aucun réseau trouvé pour le bridge %s" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Impossible de détacher le volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD non trouvé dans l'instance %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "le groupe %s existe déjà" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Impossible de deconnecter le VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Impossible de supprimer le VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "VBD créé %(vbd_ref)s pour VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "VBD créé %(vbd_ref)s pour VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"VDI créé %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on" +" %(sr_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" +"Création de l'instantané (snapshot) pour la VM %(vm_ref)s avec le label " +"'%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Instantané (snapshot) créé %(template_vm_ref)s pour la VM %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Demande de chargement à xapi de %(vdi_uuids)s en tant qu'ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "Taille de l'image %(image)s:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copie de VDI %s vers /boot/guest sur dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Noyau/Ramdisk VDI %s détruit" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy +msgid "Failed to fetch glance image" +msgstr "Échec du redémarrage de l'instance" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Recherche du VDI %s pour le PV kernel" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "Le VDI %s est toujours disponible" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) état xenserver vm -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-parcours de SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s à pour parent %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" +"L'UUID parent %(parent_uuid)s ne correspond pas au parent originel " +"%(original_parent_uuid)s, attente de coalesence..." + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Connexion de VBD %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Connexion de VBD %s terminée." + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destruction de VBD pour la VDI %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Destruction de VBD pour la VDI %s terminée." + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "Exécution de pygrub sur %s" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Kernel Xen %s trouvé" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "Pas de kernel Xen trouvé. Démarrage en HVM." + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "Ecriture de la table de partitionnement %s terminée." + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "Création d'une instance raw" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "Échec de la suspension de l'instance" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "Injection du chemin d'accès : '%s'" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +#, fuzzy +msgid "Starting VM" +msgstr "Re-démarrage xvp" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "Fin de l'instantané et du chargement de VM %s" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "Début de création d'instantané (snapshot) pour la VM %s" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Impossible d'attacher le volume à l'instance %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "Fichiers noyau/ramdisk supprimés" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +#, fuzzy +msgid "Destroying VM" +msgstr "Re-démarrage xvp" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "L'instance %(instance_id)s n'est pas suspendue" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy +msgid "Injecting network info to xenstore" +msgstr "réglage de l'hôte réseau" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Création du VIF pour la VM %(vm_ref)s, réseau %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Création du VIF pour la VM %(vm_ref)s, réseau %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "Erreur OpenSSL : %s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "Impossible de créer le dépot de stockage" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "%(label)s introduit comme %(sr_ref)s." + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "Introduction de %s" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Impossible de trouver SR du VDB %s" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "Exception %(exc)s ignorée pendant l'obtention de PBDs pour %(sr_ref)s" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "Exception %(exc)s ignorée pendant la deconnexion du PBD %(pbd)s" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Impossible d'introduire VDI sur SR %s" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Impossible de récuppérer l'enregistrement du VDI %s sur" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Impossible d'introduire le VDI pour SR %s" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Le point de montage ne peut pas être traduit : %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Impossible de créer VDI sur SR %(sr_ref)s pour l'instance " +"%(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Impossible d'utiliser SR %(sr_ref)s pour l'instance %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Impossible d'attacher le volume à l'instance %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" +"Le point de montage %(mountpoint)s a été attaché à l'instance " +"%(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Impossible de trouver le volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Impossible de détacher le volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" +"Le point de montage %(mountpoint)s à été détaché de l'instance " +"%(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Le status du volume doit être disponible" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Récupération après une exécution erronée. Tentative numéro %s" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAUX ISCSI: %s" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd n'as pas de file %s" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog n'est pas actif : %s" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "Sheepdog n'est pas actif" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Ré-exportation de %s volumes" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s : exportation évitée" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: création" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: cŕeation d'un volume logique de %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: exportation en cours" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: crée avec succès" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "Le volume est encore attaché" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "Le volume n'est pas local à ce noeud" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: suppression de l'exportation" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: suppression" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: supprimé avec succès" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instance %s: création d'un instantané (snapshot)" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconnection à la queue" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "\"Non trouvé\" remonté : %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Imposible de trouver une exportation iSCSI pour le volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Impossible de récupérer les méta-donnérs pour l'IP : %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "réponse %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "Le groupe de volume %s n'existe pas" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "Tentative de destruction d'une instance déjà détruite: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "Initialisation du Consomateur d'Adapteur pour %s" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "le sujet est %s" + +#~ msgid "message %s" +#~ msgstr "message %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) publication (key: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "Publication vers la route %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Déclaration de la queue %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Déclaration de l'échange %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" +#~ "Rattachement de %(queue)s vers %(exchange)s" +#~ " avec la clef %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Récupération depuis %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "Tâche [%(name)s] %(task)s état : succès %(result)s" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "virsh a retourné : %r" + +#~ msgid "cool, it's a device" +#~ msgstr "super, c'est un device" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "VM %s crée..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "VM %(instance_name)s crée en tant que %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Création de VBD pour la VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "La création de VBD pour la VDI %s est terminée." + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.unplug terminé dés la première tentative." + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD.unplug refusé : nouvel essai..." + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.unplug à enfin été achevée." + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "XenAPI.Failure ignorée dans VBD.unplug: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "XenAPI.Failure %s ignorée" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "instance %s: n'a pas pu être crée" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "Démarrage de la VM %s..." + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "VIF créé %(vif_ref)s pour la VM %(vm_ref)s, network %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Création de VBD pour VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/it/LC_MESSAGES/nova.po b/cinder/locale/it/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..08fcd9be9ba --- /dev/null +++ b/cinder/locale/it/LC_MESSAGES/nova.po @@ -0,0 +1,8210 @@ +# Italian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-04-01 18:59+0000\n" +"Last-Translator: simone.sandri \n" +"Language-Team: Italian \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Filename di root CA" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Nome file della chiave privata" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Dove si conservano le chiavi" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Dove si conserva root CA" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Si dovrebbe usare un CA per ogni progetto?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "Soggetto per il certificato degli utenti, %s per progetto, utente, orario" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Soggetto per il certificato dei progetti, %s per progetto, orario" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Percorso dei flags: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "E' stato riscontrato un errore sconosciuto" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "L'utente non ha i privilegi dell'amministratore" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Parametri inaccettabili." + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "La richiesta non è valida." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "E' stato ricevuto un input non valido" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "Tipo del volume non valido" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "Volume non valido" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "Impossibile sospendere l'istanza" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "Impossibile ripristinare il server" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "model server é scomparso" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Eccezione interna: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Eseguendo cmd (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +#, fuzzy +msgid "Going to try to stop instance" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/compute/api.py:996 +#, fuzzy +msgid "Going to try to start instance" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorazione: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: bloccato: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: esecuzione: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: non esecuzione |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "L'istanza é stata già creata" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Riavviando l'istanza %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "istanza %s: in pausa" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "istanza %s: fuori pausa" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "istanza %s: ricezione diagnostiche" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "istanza %s: sospensione in corso" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "istanza %s: ripristino" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "istanza %s: bloccato" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "istanza %s: sbloccato" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "istanza %s: ripristino rete" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Avviando l'interfaccia VLAN %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Avviando l'interfaccia Bridge per %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Avviando l'interfaccia Bridge per %s" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Fallita l'apertura della connessione verso l'hypervisor" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "volume %s: creato con successo" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "Impossibile riavviare l'istanza" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "L'utente non ha i privilegi dell'amministratore" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Sollevando NotImplemented" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Chiamando %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Impossibile smontare il volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk VDI %s distrutti" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy +msgid "Failed to fetch glance image" +msgstr "Impossibile riavviare l'istanza" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Cercando vdi %s per kernel PV" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy, python-format +msgid "Starting instance" +msgstr "Riavviando l'istanza %s" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "Impossibile sospendere l'istanza" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Impossibile montare il volume all'istanza %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Impossibile usare SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Impossibile montare il volume all'istanza %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s montato all'istanza %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Impossibile smontare il volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s smontato dall'istanza %(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Lo stato del volume deve essere disponibile" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: creazione in corso" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: creazione in corso per l'esportazione" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: creato con successo" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "Volume ancora collegato" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: rimuovendo" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: rimosso con successo" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "istanza %s: creazione snapshot in corso" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Riconnesso alla coda" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "risposta %s" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "Provando a distruggere una istanza già distrutta: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "Inizializzando il Consumer Adapter per %s" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "argomento é %s" + +#~ msgid "message %s" +#~ msgstr "messaggio %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "Pubblicando sulla route %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Dichiarando la coda %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Dichiarando il centralino %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "Creata VM %s.." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "Creata VM %(instance_name)s come %(vm_ref)s" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "Istanza %s: esecuzione fallita..." + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/ja/LC_MESSAGES/nova.po b/cinder/locale/ja/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..65a92759370 --- /dev/null +++ b/cinder/locale/ja/LC_MESSAGES/nova.po @@ -0,0 +1,8196 @@ +# Japanese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-08-23 11:22+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "ルートCAのファイル名" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "プライベートキーのファイル名" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "キーを格納するパス" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "ルートCAを格納するパス" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "プロジェクトごとにCAを使用するか否かのフラグ" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "ユーザの証明書のサブジェクト、%s はプロジェクト、ユーザ、タイムスタンプ" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "プロジェクトの証明書のサブジェクト、%s はプロジェクト、およびタイムスタンプ" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Flags のパス: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"コマンド: %(cmd)s\n" +"終了コード: %(exit_code)s\n" +"標準出力: %(stdout)r\n" +"標準エラー出力: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "ユーザ %(uid)s はすでにグループ %(group_dn)s のメンバです。" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "イメージをループバック %s にアタッチできません。" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "%(topic)s ノードを開始しています (バージョン %(vcs_string)s)" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "内側で発生した例外: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "ファイルをフェッチ: %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "コマンド実行(subprocess): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "コマンド実行結果: %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "コマンド(SSH)を実行: %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "コールバック中のデバッグ: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "リンクローカルアドレスが見つかりません: %s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "%(interface)s のローカルIPアドレスのリンクが取得できません:%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "不正なバックエンドです: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "バックエンドは %s です。" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "__call__ を実装しなければなりません" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "利用できません" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "認証失敗の回数が多すぎます。" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "アクセスキー %(access_key)s は %(failures)d 回認証に失敗しましたので、%(lock_mins)d 分間ロックします。" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "%s の認証に失敗しました。" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "%(uname)s 用の認証リクエスト:%(pname)s)" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "アクション(action): %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "引数: %(key)s\t\t値: %(value)s" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "コントローラ=%(controller)s とアクション=%(action)s 用の許可されていないリクエスト" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "InstanceNotFound が発行されました: %s" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "VolumeNotFound が発行されました: %s" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound 発生: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "予期しないエラー発生: %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "予期しないエラー発生: %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "未サポートの API リクエスト: コントローラ = %(controller)s, アクション = %(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Create key pair: キーペア %s を作成します。" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Delete key pair: キーペア %s を削除します。" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, fuzzy, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "有効なルールを作成する為の十分なパラメータがありません" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "指定されたパラメータに該当するルールがありません。" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Authorize security group ingress: セキュリティグループ許可 %s" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "指定されたルールは既にグループ %s に存在しています。" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Create Security Group: セキュリティグループ %s を作成します。" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "グループ %s は既に存在しています。" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Delete security group: セキュリティグループ %s を削除します。" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Create volume: %s GBのボリュームを作成します。" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "ボリューム %(volume_id)s をインスタンス %(instance_id)s のデバイス %(device)s に接続" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Detach volume: ボリューム %s をデタッチします" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "アトリビュート %s はサポートされていません。" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Allocate address: アドレスを割り当てます。" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Release address: アドレス %s を開放します。" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "インスタンス %(instance_id)s にアドレス %(public_ip)s を割り当て" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Disassociate address: アドレス %s の関連付けを解除します。" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "インスタンス終了処理を開始します。" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Reboot instance: インスタンス %r を再起動します。" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "De-registering image: イメージ %s を登録解除します。" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "イメージ %(image_location)s が ID %(image_id)s で登録されました" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "ユーザまたはグループが指定されていません。" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "グループ \"all\" のみサポートされています。" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type は add または remove の何れかである必要があります。" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "イメージ %s の公開設定を更新します。" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "例外: Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "例外: Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "例外: compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "例外: compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "例外: Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "例外: Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "例外: Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "不正なバックエンドです: %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "有効なルールを作成する為の十分なパラメータがありません" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "指定されたルールは既にグループ %s に存在しています。" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "シングルトンをインスタンス化しようとしました。" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "ユーザ %r を検索します。" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "許可されません: %(pjid)s という名称のプロジェクトはありません (ユーザ=%(uname)s)" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "許可されません: ユーザ %(uname)s は管理者でもプロジェクト %(pjname)s のメンバでもありません。" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Invalid signature: ユーザ %s の署名が不正です。" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "プロジェクトを指定してください。" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "プロジェクト %(pid)s のユーザ %(uid)s にロール %(role)s を付与します。" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "サイト共通のロール %(role)s をユーザ %(uid)s に付与します。" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "プロジェクト %(pid)s のユーザ %(uid)s からロール %(role)s を削除します。" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "ユーザ %(uid)s からサイト共通のロール %(role)s を削除します。" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "プロジェクト %(name)s を管理者 %(manager_user)s で作成しました。" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "modifying project: プロジェクト %s を更新します。" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "ユーザ %(uid)s をプロジェクト %(pid)s に追加します。" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "ユーザ %(uid)s をプロジェクト %(pid)s から削除します。" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Deleting project: プロジェクト %s を削除します。" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "ユーザ %(rvname)s を作成しました。(管理者: %(rvadmin)r)" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "ユーザ %(uid)s に対して管理者状態が %(admin)r に設定されました。" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "プロジェクト %s に関するvpnデータがありません。" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "openvpnの設定に入れるネットワークの値" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "openvpnの設定に入れるネットマスクの値" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "%s 用のVPNを起動します。" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +#, fuzzy, python-format +msgid "Cannot run any more instances of this type." +msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "raw instanceを生成します。" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "%s 個のインスタンスの起動を始めます…" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy, python-format +msgid "Going to try to soft delete instance" +msgstr "%s を停止しようとしています" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy, python-format +msgid "Going to try to terminate instance" +msgstr "%s を停止しようとしています" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "%s を停止しようとしています" + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "%s を停止しようとしています" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "インスタンス %s を終了した後です。" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "インスタンスは既に生成されています。" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "Rescuing: インスタンス %s をレスキューします。" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "Unrescuing: インスタンス %s をアンレスキューします。" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "pausing: インスタンス %s を一時停止します。" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "unpausing: インスタンス %s の一時停止を解除します。" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "suspending: インスタンス %s をサスペンドします。" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "resuming: インスタンス %s をレジュームします。" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "locking: インスタンス %s をロックします。" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "unlocking: インスタンス %s のロックを解除します。" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "getting locked state: インスタンス %s のロックを取得しました。" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "インスタンス %s: ネットワークをリセットします" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "ボリュームを未知のインスタンス %s からデタッチします。" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "コンソールを追加しています" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "存在しないコンソール %(console_id)s を削除しようとしました" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "xvp 設定を再構築しています" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "%s を再度書き込みました" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "xvp を停止しています" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "xvp を開始しています" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "xvp の開始中にエラー: %s" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "xvp を再起動しています" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp が実行されていません…" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "python-migrate がインストールされていません。終了します。" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "プライベートキーの復号に失敗しました: %s" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "初期化ベクタの復号に失敗しました: %s" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "イメージファイル %(image_file)s の復号に失敗しました: %(err)s" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d は無効です。dnsmasqを再実行します。" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "radvd 停止が %s 例外を発行しました" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d がストールしているので radvd を再実行しています…" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "VLANインタフェース %s を開始します。" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "%s 用のブリッジインタフェースを開始します。" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "%s 用のブリッジインタフェースを開始します。" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "xvp の開始中にエラー: %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "ネットワークホストの設定をします。" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "リースしていないIP %s が開放されました。" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "ネットワークの数とVLANの開始番号の和は 4094 より大きくできません。" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "受信: %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "インスタンス %s は実行中です。" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "インスタンス %s を終了した後です。" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "開始アドレス" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "ターゲット %s をアロケートしました。" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "Nested received %(queue)s, %(value)s" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "ネストした戻り値: %s" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "%s を受信。" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "ハイパーバイザへの接続に失敗しました。" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "フィルタされていないインスタンス %s のフィルタ解除を試行しました" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " +"xenapi_connection_username (オプション), xenapi_connection_password" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "例外 %s が発生しました。" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "インスタンス %(instance_name)s: インスタンスファイル群 %(target)s を削除しています" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "インスタンス%s: 再起動しました。" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "インスタンス %s: rescued" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "インスタンス %s: 起動しました。" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "インスタンス %s のイメージを生成します。" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "インスタンス %(inst_name)s: イメージ %(img_id)s へのデータ埋め込みのエラーを無視しています (%(e)s)" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "インスタンス %s: toXML メソッドを開始。" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "インスタンス %s: toXML メソッドを完了。" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "ファイルシステム %s のマウントに失敗しました。" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "イメージをループバック %s にアタッチできません。" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "空きの nbd デバイスがありません" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "nbd デバイス %s が出現しません" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "libvirt %s へ接続します。" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "libvirtへの接続が切れています。" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "インスタンス%s: 再起動しました。" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "データ: %(data)r, ファイルパス: %(fpath)r" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "開いたポートが見つかりません" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "NotImplemented 例外を発生させます。" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake には %s が実装されていません。" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "%(localname)s %(impl)s を呼び出します。" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "getter %s をコールします。" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "ブリッジ %s に対してブリッジが複数存在します。" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "ブリッジ %s に対するネットワークが存在しません。" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "ボリューム %s を切断(detach)できません" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "インスタンス %s のVBDが見つかりません。" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "グループ %s は既に存在しています。" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "VBD %s の unplug に失敗しました。" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "VBD %s の削除に失敗しました。" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用仮想ブロックデバイス(VBD) %(vbd_ref)s を作成しました。" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用仮想ブロックデバイス(VBD) %(vbd_ref)s を作成しました。" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"%(sr_ref)s 上に VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, " +"%(read_only)s) を作成しました。" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "ラベル '%(label)s' 付き VM %(vm_ref)s のスナップショットを作成しています…" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "VM %(vm_ref)s からスナップショット %(template_vm_ref)s を作成しました。" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "ID %(image_id)s として %(vdi_uuids)s のアップロードの為に xapi を問い合わせしています" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "イメージ %(image)s のサイズ:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "ドメイン0 上の /boot/guest に VDI %s をコピー中" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "カーネル/RAMディスク VDI %s が削除されました" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "PV kernelのvdi %s を取得します。" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s は依然として存在しています。" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver の vm state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi の power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "SR %s を再スキャンします。" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s の親は %(parent_ref)s です" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "親 %(parent_uuid)s が元々の親 %(original_parent_uuid)s と一致しません。作成を待機しています…" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "VBD %s を接続しています… " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "仮想ブロックデバイス(VBD) %s の接続が完了しました。" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "VDI %s 用の仮想ブロックデバイス(VBD)を削除しています… " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "VDI %s 用の仮想ブロックデバイス(VBD)の削除が完了しました。" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "%s に対して pygrub を実行しています" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Xen Kernel %s が見つかりました。" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" +"No Xen kernel found. Booting HVM.\r\n" +"Xen 用カーネルが見つかりません。完全仮想化モード(HVM)で起動しています。" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "パーティションテーブル %s の書き込みが完了しました。" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "raw instanceを生成します。" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "ファイルパス '%s' を埋め込んでいます" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +#, fuzzy +msgid "Starting VM" +msgstr "xvp を再起動しています" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "VM %s のスナップショットとアップロードが完了しました。" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "VM %s に対するスナップショットを開始します。" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "インスタンス %s にボリュームを接続(attach)できません。" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "カーネル/RAMディスクファイルが削除されました" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +#, fuzzy +msgid "Destroying VM" +msgstr "xvp を再起動しています" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy +msgid "Injecting network info to xenstore" +msgstr "ネットワークホストの設定をします。" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "VM %(vm_ref)s, network %(network_ref)s 用仮想インターフェース(VIF)を作成しています。" + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "VM %(vm_ref)s, network %(network_ref)s 用仮想インターフェース(VIF)を作成しています。" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "OpenSSL エラー: %s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "Storage Repository を作成できません。" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "%(sr_ref)s として %(label)s を導入しました" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "%s を introduce します…" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "VBD %s から SRを取得できません。" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "%(sr_ref)s 用の物理ブロックデバイス(PBD)取得時に例外 %(exc)s を無視しています" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "物理ブロックデバイス(PBD) %(pbd)s の取り外し時に例外 %(exc)s を無視しています" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "SR %s のVDIのintroduceができません。" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "VDI %s のレコードを取得できません。" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "SR %s のVDIをintroduceできません。" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "マウントポイントを変換できません。 %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s における VDI を作成できません" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s 用のSR %(sr_ref)s が使用できません" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "インスタンス %s にボリュームを接続(attach)できません。" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s にマウントポイント %(mountpoint)s を接続(attach)しました" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "ボリューム切断: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "ボリューム %s を切断(detach)できません" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s からマウントポイント %(mountpoint)s を切断(detach)しました" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "ボリュームのステータス(status)は available でなければなりません。" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd にプール %s がありません。" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog が動作していません: %s" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "Sheepdog が機能していません" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "ボリュームはアタッチされたままです。" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "snapshotting: インスタンス %s のスナップショットを取得中" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "キューに再接続しました。" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound 発生: %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "ボリューム %s 用の iSCSI エクスポートが見つかりません" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "応答 %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "ボリュームグループ%sが存在しません。" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "既に消去済みのインスタンス%sを消去しようとしました。" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "topic は %s です。" + +#~ msgid "message %s" +#~ msgstr "メッセージ %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) 公開 (キー: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "ルート %s へパブリッシュ" + +#~ msgid "Declaring queue %s" +#~ msgstr "queue %s の宣言" + +#~ msgid "Declaring exchange %s" +#~ msgstr "exchange %s の宣言" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "キー %(routing_key)s 付きで %(exchange)s に %(queue)s をバインドしています" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "%(queue)s から取得しています: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "タスク [%(name)s] %(task)s 状態: 成功 %(result)s" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "virsh の出力: %r" + +#~ msgid "cool, it's a device" +#~ msgstr "デバイスです。" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "VM %s を作成します。" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "%(vm_ref)s として VM %(instance_name)s を作成しています" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "VDI %s 用に VBD を作成しています… " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "VDI %s 用 VBD の作成が完了しました。" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.unplug は1回目で成功しました。" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD.unplug が拒否されました: 再試行しています…" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.unplug は最終的に成功しました。" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "VBD.unplug 中の XenAPI.Failure を無視しています: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "XenAPI.Failure %s を無視しています" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "インスタンス %s: 起動に失敗しました" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "VM %s を開始します…" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "VM %(vm_ref)s, network %(network_ref)s 用 VIF %(vif_ref)s を作成しました。" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "VM %(vm_ref)s, VDI %(vdi_ref)s 用 VBD を作成しています… " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/ko/LC_MESSAGES/nova.po b/cinder/locale/ko/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..0c934a398b1 --- /dev/null +++ b/cinder/locale/ko/LC_MESSAGES/nova.po @@ -0,0 +1,8207 @@ +# Korean translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-12-16 04:42+0000\n" +"Last-Translator: Zhongyue Luo \n" +"Language-Team: Korean \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "CA 루트의 파일이름" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "비밀키의 파일명" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "키를 저장하는 경로" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "CA 루트를 저장하는 경로" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "각 프로젝트마다 CA를 사용하시겠습니까?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "플래그 경로: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "명령 실행도중 예측하지 못한 에러가 발생했습니다" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "인스턴스가 이미 생성되었습니다" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "인스턴스 %s를 재부팅합니다" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy, python-format +msgid "Starting instance" +msgstr "인스턴스 %s를 재부팅합니다" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "%s 인스턴스에 볼륨장착 할 수 없습니다" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR에 대한 VDI 생성이 실패했습니다" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "%(instance_name)s 인스턴스의 %(sr_ref)s SR을 사용 할 수 없습니다" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "%s 인스턴스에 볼륨장착 할 수 없습니다" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "%(instance_name)s 인스턴스에 %(mountpoint)s 마운트지점이 장착되었습니다" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "볼륨 탈착: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "%s 볼륨 탈착에 실패했습니다" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "%(instance_name)s 인스턴스에 %(mountpoint)s 마운트지점이 탈착되었습니다" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "볼륨의 상태를 알 수 없습니다" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "인스턴스 %s: 스냅샷 저장중" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "%s 볼륨을 찾을수 없습니다" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "인스턴스 %s가 이미 삭제되었습니다" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "인스턴스 %s: 생성에 실패했습니다" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/nova.pot b/cinder/locale/nova.pot new file mode 100644 index 00000000000..2f94b7e3d12 --- /dev/null +++ b/cinder/locale/nova.pot @@ -0,0 +1,7463 @@ +# Translations template for cinder. +# Copyright (C) 2012 ORGANIZATION +# This file is distributed under the same license as the cinder project. +# FIRST AUTHOR , 2012. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: cinder 2012.2\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + diff --git a/cinder/locale/pt_BR/LC_MESSAGES/nova.po b/cinder/locale/pt_BR/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..899b9e6b73e --- /dev/null +++ b/cinder/locale/pt_BR/LC_MESSAGES/nova.po @@ -0,0 +1,8208 @@ +# Brazilian Portuguese translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-02-06 21:07+0000\n" +"Last-Translator: Adriano Steffler \n" +"Language-Team: Brazilian Portuguese \n" +"Plural-Forms: nplurals=2; plural=(n > 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Nome do arquivo da CA raiz" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Nome do arquivo da chave privada" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Aonde armazenamos nossas chaves" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Onde mantemos nosso CA raiz" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Devemos usar um CA para cada projeto?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "Assunto do certificado para usuários, %s para projeto, usuário, timestamp" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Assunto do certificado para projetos, %s para projeto, timestamp" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Localização dos sinalizadores: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Código de saída: %(exit_code)s\n" +"Saída padrão: %(stdout)r\n" +"Erro: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "sem método para mensagem: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Começando a terminar instâncias" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "Usuário %(uid)s já é um membro do grupo %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Exceção interna: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Buscando %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Executando o comando (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "depuração em retorno de chamada: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Endereço para Link Local não encontrado: %s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "backend %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Muitas falhas de autenticação." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Falha de Autenticação: %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "ação: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "NotFound lançado: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "Erro inexperado lançado: %s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Erro inexperado lançado: %s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "Ocorreu um erro desconhecido. Por favor tente sua requisição cindermente." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Criar par de chaves %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Remover par de chaves %s" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revogado entrada do grupo de segurança %s" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "Não existe regra para os parâmetros especificados" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "Autorizada entrada do grupo de segurança %s" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "Esta regra já existe no grupo %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Criar Grupo de Segurança %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "group %s já existe" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Excluir grupo de segurança %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obter saída do console para instância %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Desanexar volume %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Desanexar volume %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "atributo não suportado: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Alocar endereço" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Liberar endereço %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Desatribuir endereço %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "Começando a terminar instâncias" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instância %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "Removendo o registro da imagem %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "usuário ou grupo não especificado" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "apenas o grupo \"all\" é suportado" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type deve ser add ou remove" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Atualizando publicidade da imagem %s" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Backend inválido: %s" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Revogado entrada do grupo de segurança %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regra já existe no grupo %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentativa de instanciar singleton" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Tentatica de remover o último membto de um grupo. Ao invés disso " +"excluindo o grupo %s." + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "Procurando usuário: %r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Falha de autorização para chave de acesso %s" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Usando nome do projeto = nome do usuário (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Assinatura inválida para usuário %s" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "Deve especificar projeto" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "modificando projeto %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Adicionando usuário %(uid)s ao projeto %(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Remover usuário %(uid)s do projeto %(pid)s" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Excluindo projeto %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Apagando usuário %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Obter saída do console para instância %s" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "Executando VPN para %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +#, fuzzy +msgid "Going to try to soft delete instance" +msgstr "Começando a terminar instâncias" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +#, fuzzy +msgid "Going to try to terminate instance" +msgstr "Começando a terminar instâncias" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +#, fuzzy +msgid "Going to try to start instance" +msgstr "Começando a terminar instâncias" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executando: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executando |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "A instância já foi criada" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Reiniciando a instância %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "instância %s: resgatando" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "instância %s: desfazendo o resgate" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "instância %s: pausando" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "instância %s: saindo do pause" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instância %s: recuperando os diagnósticos" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "instância %s: suspendendo" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "instância %s: resumindo" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "instância %s: bloqueando" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "instância %s: desbloqueando" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "instância %s: obtendo estado de bloqueio" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "instância %s: reset da rede" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Desconectando volume da instância desconhecida %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "Adicionando console" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d está ultrapassado, reiniciando dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d está ultrapassado, reiniciando radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Iniciando a VLAN %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Iniciando a Bridge para %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Iniciando a Bridge para %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Removendo o registro da imagem %s" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Falha ao abrir a conexão com o hypervisor" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "volume %s: criado com sucesso" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "Impossível localizar uma porta aberta" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "Aumento não implementado" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake não tem uma implementação para %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Chamando %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Chamando o pai %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake não tem implementação para %s ou foi chamado com um número de" +" argumentos inválido" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Encontrado múltiplas redes para a bridge %s" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "Não foi encontrada rede para bridge %s" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Não é possível desconectar o volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "O VBD não foi encontrado na instância %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "group %s já existe" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Não é possível desconectar o VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Não é possível destruir o VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "VBD %(vbd_ref)s criado para VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "VBD %(vbd_ref)s criado para VM %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) no SR " +"%(sr_ref)s criada com sucesso." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "Fazendo um snapshot da VM %(vm_ref)s com rótulo '%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Snapshot %(template_vm_ref)s criado a partir da VM %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" +"Solicitando à xapi para realizar upload da imagem %(vdi_uuids)s com ID " +"%(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "Tamanho da imagem %(image)s:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copiando o VDI %s de /boot/guest no dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk %s destruidos" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Verificando o vdi %s para kernel PV" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "O VDI %s continua disponível" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Re-escaneando SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "O VHD %(vdi_uuid)s tem pai %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Conectando VBD %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "O VDB %s foi conectado." + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Destruindo VBD para o VDI %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "O VBD para o VDI %s foi destruído." + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "Rodando pygrub cindermente %s" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Kernel Xen encontrado: %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "Kernel Xen não encontrado. Iniciando como HVM." + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy, python-format +msgid "Starting instance" +msgstr "Reiniciando a instância %s" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Não é possível anexar o volume na instância %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Criando a VIF para VM %(vm_ref)s, rede %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Criando a VIF para VM %(vm_ref)s, rede %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "Erro de OpenSSL: %s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "Introduzindo %s..." + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Ponto de montagem não pode ser traduzido: %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Não é possível criar o VDI no SR %(sr_ref)s para a instância " +"%(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Não é possível usar o SR %(sr_ref)s para a instância %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Não é possível anexar o volume na instância %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Ponto de montagem %(mountpoint)s conectada à instância %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Não é possível desconectar o volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Ponto de montagem %(mountpoint)s desanexada da instância %(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "O status do volume parece estar disponível" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Re-exportando %s volumes" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "volume %s: ignorando export" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "volume %s: criando" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: criando o export" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "volume %s: criado com sucesso" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "O volume continua atachado" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "O volume não pertence à este node" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "volume %s: removendo export" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "volume %s: removendo" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volume %s: remoção realizada com sucesso" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "instância %s: fazendo um snapshot" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Reconectado à fila" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "NotFound lançado: %s" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Não é possível localizar o volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "resposta %s" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "tentando destruir instância já destruida: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "Iniciando o Adaptador Consumidor para %s" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "topico é %s" + +#~ msgid "message %s" +#~ msgstr "mensagem %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) publicar (key: %(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "Publicando para rota %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "Declarando fila %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Declarando troca %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "Ligação %(queue)s para %(exchange)s com chave %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Recebendo de %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "VM %s criada..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "VM %(instance_name)s criada como %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Criando VBD para VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "O VBD para VDI %s foi criado." + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "Ignorando XenAPI.Failure em VBD.unplug: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "instância %s: falha na geração" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "VIF %(vif_ref)s criada para VM %(vm_ref)s, rede %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Criando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/ru/LC_MESSAGES/nova.po b/cinder/locale/ru/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..bc639e4f181 --- /dev/null +++ b/cinder/locale/ru/LC_MESSAGES/nova.po @@ -0,0 +1,8304 @@ +# Russian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-03-25 09:34+0000\n" +"Last-Translator: Eugene Marshal \n" +"Language-Team: Russian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "Имя файла корневого центра сертификации" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Имя файла секретного ключа" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "Имя файла корневого списка отзыва сертификатов" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Путь к ключам" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "Место расположения нашего корневого центра сертификации" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Должны ли мы использовать центр сертификации для каждого проекта?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" +"Тема для сертификатов пользователей, %s для проекта, пользователя, " +"временной метки" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Тема для сертификатов проектов, %s для проекта, временная метка" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "Расположение флагов: %s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Команда: %(cmd)s\n" +"Код выхода: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "Обнаружено неизвестное исключение." + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "Ошибка дешифровки текста" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "Ошибка создания виртуального интерфейса" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" +"5 безуспешных попыток создания виртуального интерфейса с уникальным " +"mac-адресом" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "Сбой соединения с glance" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "Сбой соединения c melange" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "Не авторизировано." + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "Пользователь не имеет административных привилегий" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "Политика не допускает выполнения %(action)s." + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "Ядро не найдено для образа %(image_id)s." + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "Недопустимые параметры." + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "Недопустимый снимок" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "Том %(volume_id)s никуда не присоединён" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "Ошибка загрузки данных в формат json" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "Недопустимый запрос." + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "Недопустимая подпись %(signature)s для пользователя %(user)s." + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "Недопустимый тип копии %(instance_type)s." + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "Недопустимый тип тома" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "Недопустимый том" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "Недопустимый диапазон портов %(from_port)s:%(to_port)s. %(msg)s" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "Недопустимый протокол IP %(protocol)s." + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "Недопустимый тип содержимого %(content_type)s." + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "Недопустимый cidr %(cidr)s." + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, fuzzy, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "Допустимый узел не найден. %(reason)s" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" +"Копия %(instance_uuid)s в %(attr)s %(state)s. Невозможно %(method)s во " +"время нахождения копии в этом состоянии." + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "Копия %(instance_id)s не выполняется." + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "Копия %(instance_id)s не переведена в режим приостановления." + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "Копия %(instance_id)s не переведена в режим восстановления" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "Ошибка приостановления копии" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "Ошибка возобновления работы сервера" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "Ошибка перезагрузки копии" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "Ошибка перезагрузки копии" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "В данный момент служба недоступна." + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "Служба томов в данный момент недоступна." + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "Служба Compute недоступна в настоящее время." + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "Невозможно переместить копию (%(instance_id)s) на текущий узел (%(host)s)." + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "Назначенный узел compute недоступен в настоящее время." + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "Исходный узел compute недоступен в настоящее время." + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "Копии необходима новая версия гипервизора, вместо предоставленной." + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" +"Предоставленный адрес диска (%(path)s) уже существует, но ожидалось, что " +"отсутствует." + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "Недопустимое размещение предоставленного устройства (%(path)s)." + +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "Недопустимое размещение предоставленного устройства (%(path)s)." + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "Недопустимые сведения ЦПУ" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "%(address)s не является допустимым IP-адресом в4/6." + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "Форматирование диска %(disk_format)s недопустимо" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "Копия %(instance_id)s недопустима: %(reason)s" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "Ec2 id %(ec2_id)s недопустим." + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "Ресурс не может быть найден." + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "Необходимый флаг %(flag)s не назначен." + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "Том %(volume_id)s не найден." + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "Не найден том для копии %(instance_id)s." + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "Том %(volume_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "Тип тома %(volume_type_id)s не может быть найден." + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "Тип тома под названием %(volume_type_name)s не может быть найден." + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" +"Тип тома %(volume_type_id)s не имеет дополнительных особенностей с ключом" +" %(extra_specs_key)s." + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "Снимок %(snapshot_id)s не может быть найден." + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "удаление тома %(volume_name)s, который имеет снимок" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "Отсутствует диск в %(location)s" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "Невозможно найти обработчик для тома %(driver_type)s." + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "Недопустимый образ href %(image_href)s." + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "Образ %(image_id)s не найден." + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "Ядро не найдено для образа %(image_id)s." + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "Пользователь %(user_id)s не найден." + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "Проект %(project_id)s не найден." + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "Пользователь %(user_id)s не является участником проекта %(project_id)s." + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "Полномочия %(role_id)s не могут быть найдены." + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "Невозможно найти SR для чтения/записи VDI." + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "%(req)s необходимо для создания сети." + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "Сеть %(network_id)s не найдена." + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "Сеть не может быть найдена для моста %(bridge)s" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "Сеть не может быть найдена для uuid %(uuid)s" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "Сеть не найдена с cidr %(cidr)s." + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "Сеть не найдена для копии %(instance_id)s." + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "Сети не определены." + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" +"Или uuid %(network_uuid)s сети не предоставлено или не присвоено проекту " +"%(project_id)s." + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "Узел не назначен сети (%(network_id)s)." + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "Недопустимый фиксированный IP-адрес %(address)s." + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "Запись DNS %(name)s уже существует в домене %(domain)s." + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "Интерфейс %(interface)s не найден." + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "Сертификат %(certificate_id)s не найден." + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "Служба %(service_id)s не найдена." + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "Узел %(host)s не найден." + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "Узел сompute %(host)s не найден." + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "Ключ доступа %(access_key)s не найден." + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "Квота не найдена" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "Квота проекта %(project_id)s не найдена." + +#: cinder/exception.py:696 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "Группа безопасности %(security_group_id)s не найдена." + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" +"Группа безопасности %(security_group_id)s не найдена для проекта " +"%(project_id)s." + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "Группа безопасности с правилом %(rule_id)s не найдена." + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" +"Группа безопасности %(security_group_id)s уже ассоциирована с копией " +"%(instance_id)s" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" +"Группа безопасности %(security_group_id)s не ассоциирована с копией " +"%(instance_id)s" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "Перемещение %(migration_id)s не найдено." + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "Перемещение не найдено для копии %(instance_id)s в состоянии %(status)s." + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "Пул консоли %(pool_id)s не найден." + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "Консоль %(console_id)s не найдена." + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "Консоль для копии %(instance_id)s не найдена." + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "Консоль для копии %(instance_id)s в пуле %(pool_id)s не найдена." + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "Недопустимый тип консоли %(console_type)s " + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "Тип копии %(instance_type_id)s не найден." + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "Тип копии с названием %(instance_type_name)s не найден." + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, fuzzy, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "Зона %(zone_id)s не найдена." + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "Копия %(instance_id)s не имеет метаданных с ключом %(metadata_key)s." + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "Объект LDAP не найден" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "Пользователь LDAP %(user_id)s не найден." + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "Группа LDAP %(group_id)s не найдена." + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "Пользователь LDAP %(user_id)s не является участником группы %(group_id)s." + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "Файл %(file_path)s не может быть найден." + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" +"Не найден виртуальный переключатель ассоциированный с сетевым адаптером " +"%(adapter)s." + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "Сетевой адаптер %(adapter)s не может быть найден." + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "Класс %(class_name)s не найден: %(exception)s" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "Действие не разрешено." + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "Невозможно использовать глобальные полномочия %(role_id)s" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "Пользователь %(user)s уже существует." + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "Пользователь LDAP %(user)s уже существует." + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "Группа LDAP %(group)s уже существует." + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "Пользователь %(uid)s уже является участником группы %(group_dn)s" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "Проект %(project)s уже существует." + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "Копия %(name)s уже существует." + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "Тип копии %(name)s уже существует." + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "Тип тома %(name)s уже существует." + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "%(path)s располагается на общедоступном накопителе: %(reason)s" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "Ошибка перемещения" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "Неправильное тело сообщения: %(reason)s" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "Невозможно найти конфигурацию по адресу %(path)s" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "При изменении размера, копии должны изменить размер!" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "Образ больше, чем допустимо для этого типа копии" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "1-а или несколько зон не могут завершить запрос" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "Недостаточно памяти на узле сети compute для запуска %(uuid)s." + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "Допустимый узел не найден. %(reason)s" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "Узел %(host)s не работает или не существует." + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "Превышена квота" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" +"Невозможно создать volume_type с именем %(name)s и спецификациями " +"%(extra_specs)s" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "Копия %(instance_id)s не найдена." + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "Получение образа %(image)s" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" +"Пропуск %(full_task_name)s, %(ticks_to_skip)s раз осталось, для " +"произведения следующего запуска" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "Запуск повторяющегося задания %(full_task_name)s" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "Ошибка во время %(full_task_name)s: %(e)s" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "Запуск узла сети (версия %(vcs_string)s) %(topic)s" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "Служба завершила работу из-за отсутствия записи базы данных" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "Полный набор ФЛАГОВ:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "Вложенное исключение: %s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "Получение %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "Приняты неизвестные аргументы ключевого слова для utils.execute: %r" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Выполнение команды (субпроцесс): %s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "Результат %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r ошибка. Выполняется повтор." + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "Выполнение команды (SSH): %s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "Среда не поддерживается с использованием SSH" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "process_input не поддерживается с использованием SSH" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "отладка в обратном вызове: %s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "Недопустимый внутренний интерфейс: %s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "внутренний интерфейс %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "Ожидался объект типа: %s" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "Недопустимая server_string: %s" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc: '%(name)s' заняла %(total_time).2f с." + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "Исходное исключение было сброшено" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "Класс %(fullname)s устарел: %(msg)s" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "Класс %(fullname)s устарел" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "Функция %(name)s в %(location)s устарела: %(msg)s" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "Функция %(name)s в %(location)s устарела" + +#: cinder/utils.py:1681 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "Выполняется %(name)s на %(host)s:%(port)s" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "Выполняется останов сервера WSGI." + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "Выполняется останов сервера TCP." + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "Запуск TCP сервера %(arg0)s на %(host)s:%(port)s" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "Сервер WSGI был остановлен." + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "недоступно" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Слишком много неудачных попыток аутентификации." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "Ошибка аутентификации: %s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "Запрос аутентификации для %(uname)s:%(pname)s)" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "действие: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" +"Неавторизированный запрос для контроллера=%(controller)s и " +"действия=%(action)s" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" +"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш " +"запрос." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" +"Неподдерживаемый запрос API: контроллер = %(controller)s, действие = " +"%(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "Создать снимок тома %s" + +#: cinder/api/ec2/cloud.py:372 +#, fuzzy, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" +"Значение (%s) для параметра GroupName недопустимо. Содержание ограничено " +"буквенно-цифровыми символами, пробелами, тире и подчёркиваниями." + +#: cinder/api/ec2/cloud.py:378 +#, fuzzy, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" +"Значение (%s) для параметра GroupName недопустимо. Длина превышает " +"максимально допустимое значение 255." + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "Создание пары ключей %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "Импортировать ключ %s" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "Удаление пары ключей %s" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy, python-format +msgid "Invalid CIDR" +msgstr "Недопустимый cidr %(cidr)s." + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, fuzzy, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "Недостаточно параметров для сбора правильного правила." + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "Отсутствует правило для заданных параметров." + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "Это правило уже существует в группе %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" +"Значение (%s) для параметра GroupName недопустимо. Содержание ограничено " +"буквенно-цифровыми символами, пробелами, тире и подчёркиваниями." + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" +"Значение (%s) для параметра GroupName недопустимо. Длина превышает " +"максимально допустимое значение 255." + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "Создать группу безопасности %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "группа %s уже существует" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Удалить группу безопасности %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "Получить консольный вывод для копии %s" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "Создать том из снимка %s" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: cinder/api/ec2/cloud.py:921 +#, fuzzy +msgid "Delete Failed" +msgstr "Ошибка создания" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "Подключить том %(volume_id)s для копии %(instance_id)s на %(device)s" + +#: cinder/api/ec2/cloud.py:939 +#, fuzzy +msgid "Attach Failed." +msgstr "Ошибка создания" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Отсоединить том %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Отсоединить том %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "аттрибут не поддерживается: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "Выделить адрес" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "Присвоить адрес %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "Присвоить адрес %(public_ip)s копии %(instance_id)s" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "Исключить адрес %s" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "Образ должен быть доступен" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "Выполнение завершения работы копий" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "Перезагрузить копию %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "Выполнение остановки копий" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "Выполнение запуска копий" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "Исключение регистрации образа %s" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "Зарегистрированный образ %(image_location)s с идентификатором %(image_id)s" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "не указан пользователь или группа" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "поддерживается только группа \"все(all)\"" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "Обновление осведомлённости об образе %s" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "Невозможно остановить копию в течении %d с." + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "Обнаружена ошибка: %s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s возвратил с HTTP %(status)d" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" +"Расширение %(ext_name)s: Невозможно расширить ресурс %(collection)s: Нет " +"такого ресурса" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "Расширение %(ext_name)s расширение ресурса: %(collection)s" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "%(user_id)s не может быть найден с токеном '%(token)s'" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "%(user_id)s должен быть администратором или участником %(project_id)s" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "В запросе невозможно найти %s." + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "Аутентификация '%s' выполнена" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "Не найден пользователь с предоставленным API ключом." + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "Предоставленный ключ API допустим, но не для пользователя '%(username)s'" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "маркер [%s] не найден" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s не содержит версию" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, fuzzy, python-format +msgid "Converted networks: %s" +msgstr "Непредвиденная ошибка: %s" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "Невозможно '%(action)s', когда копия в %(attr)s %(state)s" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "Копия в недопустимом состоянии для '%(action)s'" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "Отклонение запроса снимка, снимки в данный момент отключены" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "Снимки копии в настоящий момент недопустимы." + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "Загруженное расширение: %s" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "Загрузка расширения %s" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_factory)s: %(exc)s" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "Ошибка загрузки расширения %(ext_name)s: %(exc)s" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "Расширенный ресурс: %s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "В запросе предоставлен не распознанный тип-содержимого" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "Тип содержимого не предоставлен в запросе" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "Пустое тело предоставлено в запросе" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "Неправильное тело запроса" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "Не поддерживаемый тип содержимого" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "Неправильный запрос url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s возвратил ошибку: %(e)s" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "элемент не является потомком" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "Инициализация диспетчера расширений." + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "образ не найден." + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "Неправильный формат тела запроса" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "Тело запроса и URI не совпадают" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "Тело запроса содержит избыточное количество объектов" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "Неправильный ключ метаданных" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "Копия не существует" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "Копия не является участником заданной сети" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" +"Только %(value)s %(verb)s запрос(ов) могут быть сделаны для %(uri)s, " +"каждые %(unit_string)s." + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "Сервер не существует" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "Элемент метаданных не найден" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "Недопустимое состояние сервера: %(status)s" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "Имя сервера является пустой строкой" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "Недопустимый сетевой формат: сетевой uuid имеет неправильный формат (%s)" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "Недопустимый фиксированный IP-адрес (%s)" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "Дубликаты сетей (%s) не разрешены" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "Недопустимый сетевой формат: отсутствует %s" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "Недопустимый сетевой формат" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "Содержимое данных пользователя не может быть дешифровано" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "Имя сервера не задано" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "Предоставлен недопустимый flavorRef." + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "Невозможно найти запрошенный образ" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "Предоставлен недопустимый key_name." + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "С копией не производилось изменение размера." + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "Отсутствует аргумент типа 'type' для перезагрузки" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "Ошибка при перезагрузке %s" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "Изменение размера требует изменения объёма." + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "Неправильный объект сервера" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "Отсутствует атрибут imageRef" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "Отсутствует атрибут flavorRef" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "adminPass не был задан" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "Недопустимый adminPass" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "Запрос изменения размера имеет недопустимый атрибут 'flavorRef'." + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "Запросы изменение размера требуют атрибут 'flavorRef'." + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "Недопустимый запрос тела" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "Копия не найдена" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "Недопустимые метаданные" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "Удаление параметров '%(unk_opt_str)s' из запроса" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "Ошибка перемещения %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "Сервер не найден" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "Копия не найдена" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, fuzzy, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" +"Невозможно создать volume_type с именем %(name)s и спецификациями " +"%(extra_specs)s" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, fuzzy, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "Ошибка обновления агента: %(resp)r" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "Сервер не найден." + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +#, fuzzy +msgid "Flavor not found." +msgstr "Сервер не найден." + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "Нет тела запроса" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "Адрес не задан" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "Недопустимое состояние: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "Недопустимое состояние: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "Недопустимый параметр обновления: '%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, fuzzy, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "Перевод узла %(host)s в %(state)s." + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "Узел не найден" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "Отсутствует аргумент 'networkId' для addFixedIp" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "Отсутствует аргумент 'address' для removeFixedIp" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "Невозможно найти адрес %r" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "Исключение сети с идентификатором %s" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "Сеть не найдена" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "Отображение сети с идентификатором %s" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "Удаление сети с идентификатором %s" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy, python-format +msgid "Security group is still in use" +msgstr "Группа безопасности (%s) не найдена" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "Группа безопасности %s уже существует" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "Группа безопасности %s не может быть пустой." + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "Группа безопасности (%s) не найдена" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "Недостаточно параметров для сбора правильного правила." + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Это правило уже существует в группе %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "Правило (%s) не найдено" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "Группа безопасности не задана" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "Наименование группы безопасности не может отсутствовать" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "Удалить том с идентификатором: %s" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "Присоединить том %(volume_id)s к копии %(server_id)s на %(device)s" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "Удалить снимок с идентификатором: %s" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "Создать снимок из тома %s" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Попытка удаление последнего участника группы. Будет выполнено удаление " +"группы в %s." + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "Поиск пользователя: %r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Ошибка авторизации для ключа доступа %s" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "ошибка авторизации: нет проекта под названием %(pjid)s (user=%(uname)s)" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" +"Ошибка авторизации: пользователь %(uname)s не является администратором и " +"не является участником проекта %(pjname)s" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "подпись: %s" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Не допустимая подпись для пользователя %s" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "Необходимо указать проект" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "Добавление полномочий %(role)s для пользователя %(uid)s в проекте %(pid)s" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "Удаление полномочий %(role)s для пользователя %(uid)s в проекте %(pid)s" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "Созданный проект %(name)s с диспетчером %(manager_user)s" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "изменение проекта %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "Добавление пользователя %(uid)s в проект %(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "Исключить пользователя %(uid)s из проекта %(pid)s" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "Удаление проекта %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "Созданный пользователь %(rvname)s (admin: %(rvadmin)r)" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "Удаление пользователя %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "Режим администратора назначен %(admin)r для пользователя %(uid)s" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "Нет vpn данных для проекта %s" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "Недопустимый тип копии %(instance_type)s." + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "Запуск VPN для %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" +"Превышена квота для %(pid)s, попытка назначить %(num_metadata)s свойств " +"метаданных" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" +"Превышена квота для %(pid)s, превышено ключевое свойство метаданных или " +"значение" + +#: cinder/compute/api.py:257 +#, fuzzy +msgid "Cannot run any more instances of this type." +msgstr "" +"Превышена квота копий. Вы не можете запустить дополнительные копии этого " +"типа." + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "Превышена квота копий. Вы можете запустить только %s копий этого типа." + +#: cinder/compute/api.py:261 +#, fuzzy, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "Превышена квота для %(pid)s, попытка выполнить %(min_count)s копий" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "Создание необработанной копии" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "Выполняет запуск %s копий..." + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "bdm %s" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "block_device_mapping %s" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "Отправка операции создания в расписание для %(pid)s/%(uid)s's" + +#: cinder/compute/api.py:871 +#, fuzzy +msgid "Going to try to soft delete instance" +msgstr "Выполнение запуска копий" + +#: cinder/compute/api.py:891 +#, fuzzy, python-format +msgid "No host for instance, deleting immediately" +msgstr "Отсутствует узел для копии %s, немедленное удаление" + +#: cinder/compute/api.py:939 +#, fuzzy, python-format +msgid "Going to try to terminate instance" +msgstr "Будет выполнения попытка завершить работу %s" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "Попытка остановить %s" + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "Попытка запустить %s" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "Копия %(instance_uuid)s не остановлена. (%(vm_state)s" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "Поиск по: %s" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "Тип образа не распознан %s" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" +"Устаревший тип копии %(current_instance_type_name)s, новый тип копии " +"%(new_instance_type_name)s" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "Ошибка БД: %s" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "Копия типа %s не найдена для выполнения удаления" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: оформление: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" +"check_instance_lock: аргументы: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: заблокирован: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: администратор: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: исполнение: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "Невозможно загрузить драйвер виртуализации: %s" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "Драйвер гипервизора не поддерживает правила брандмауэра" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "исключение завершает работу копии %(instance_uuid)s" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "Копия %s не найдена." + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "Копия уже была создана" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" +"Образ '%(image_id)s' размером %(size_bytes)d, превышает exceeded " +"instance_type допустимый размер %(allowed_size_bytes)d" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "Пропуск DiskNotFound: %s" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "завершение работы bdm %s" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "%s. Установка состояния копии vm_state на ERROR" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "Обновление сборки %s" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "Перезагрузка копии %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" +"попытка перезагрузки не выполняемой копии: %(instance_uuid)s (состояние: " +"%(state)s ожидалось: %(running)s)" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "копия %s: выполнение снимка" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" +"попытка создания снимка не выполняемой копии: %(instance_uuid)s " +"(состояние: %(state)s ожидалось: %(running)s)" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "Найдено %(num_images)d образов (ротация: %(rotation)d)" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "Удаление образа %s" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "Невозможно назначить пароль администратора. Копия %s не выполняется" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "Копия %s: Назначение административного пароля" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "set_admin_password не реализован в этой драйвере." + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "Ошибка назначения пароля администратора" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "копия %(instance_uuid)s: обновление агента до %(url)s" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "назначение совпадает с источником!" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "копия %s: перемещение" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "копия %s: приостановление" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "копия %s: снятие с приостановления" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "копия %s: принятие диагностики" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "копия %s: приостановление" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "копия %s: возобновление" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "копия %s: блокирование" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "копия %s: разблокирование" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "копия %s: получение заблокированного состояния" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "копия %s: сброс сети" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "копия %s: получение консоли vnc" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, fuzzy, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "копия %(instance_uuid)s: обновление агента до %(url)s" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, fuzzy, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "Копия %(instance_id)s не переведена в режим восстановления" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Отсоединение тома от неизвестной копии %s" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" +"Создание временного файла %s для оповещения других узлов сети compute о " +"необходимости присоединения того же хранилища." + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "plug_vifs() ошибка %(cnt)d.До %(max_retry)d попыток для %(hostname)s." + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "Запущено post_live_migration().." + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" +"Вы можете увидеть ошибку \"libvirt: ошибка QEMU: Домен не найден: " +"отсутствует домен с соответствующим именем.\" Эта ошибка может быть " +"безопасно пропущена." + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "Обновление временных данных использования полосы пропускания" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "Обновление состояния узла" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" +"Найдено %(num_db_instances)s в базе данных и %(num_vm_instances)s в " +"гипервизоре." + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "FLAGS.reclaim_instance_interval <= 0, пропуск..." + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" +"Не распознанное значение '%(action)s' для " +"FLAGS.running_deleted_instance_action" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "Добавление консоли" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "Выполнена попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "Попытка удаления несуществующей консоли %(console_id)s." + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "Удаление консоли %(console_id)s." + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "Обновление сборки xvp conf" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "Выполнение останова xvp" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "Запуск xvp" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "Ошибка запуска xvp: %s" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "Выполнение перезагрузки xvp" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp не выполняется..." + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "Удаление токена с истёкшим сроком: (%s)" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "Полученный токен: %(token)s, %(token_dict)s)" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "Проверка токена: %(token)s, %(token_valid)s)" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "Нераспознанное значение read_deleted '%s'" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "python-migrate не установлен. Выход." + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "Ошибка соединения с SQL (%(connstring)s). %(attempts)d попыток осталось." + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "столбец интерфейса не добавлен в таблицу сетей" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "Таблица |%s| не создана!" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "Столбец VIF не добавлен в таблицу fixed_ips" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "столбец приоритета не добавлен в таблицу сетей" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "ограничения внешнего ключа не могут быть удалены" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "столбец выполнения не добавлен в таблицу копий" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "столбец выполнения не добавлен в таблицу compute_nodes" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "Не владелец образа" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "Ошибка загрузки %(image_location)s в %(image_path)s" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "Ошибка расшифрования %(image_location)s в %(image_path)s" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "Ошибка извлечения %(image_location)s в %(image_path)s" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "Ошибка выгрузки %(image_location)s в %(image_path)s" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "Ошибка дешифрирования личного ключа: %s" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "Ошибка дешифрирования вектора инициализации: %s" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "Ошибка дешифрирования файла образа %(image_file)s: %(err)s" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "В образе небезопасные имена файлов" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "Недопустимый mac для to_global_ipv6: %s" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "Недопустимый префикс для to_global_ipv6: %s" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "Недопустимый project_id для to_global_ipv6: %s" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "Этот драйвер поддерживает только записи типа 'a'." + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "Предпринята удалить цепочку %s, которая не существует" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "Неизвестная цепочка: %r" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" +"Предпринята попытка удалить правило, которого там нет: %(chain)r %(rule)r" +" %(wrap)r %(top)r" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Запуск интерфейса VLAN %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Запуск моста интерфейса для %s" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "Запуск копии %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "Ошибка запуска xvp: %s" + +#: cinder/network/linux_net.py:1167 +#, fuzzy, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "Ошибка отсоединения vif копии '%s'" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, fuzzy, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "Сертификат %(certificate_id)s не найден." + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "Интерфейс %(interface)s не найден" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "Адрес |%(address)s| не выделен" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "Адрес |%(address)s| не выделен вашему проекту |%(project)s|" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "Превышена квота для %s, попытка выделения адреса" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "Домен |%(domain)s| уже существует, изменение зоны на |%(av_zone)s|." + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "Домен |%(domain)s| уже существует, изменение проекта на |%(project)s|." + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "установка сетевого узла" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "исключение сетевых распределений для копии |%s|" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" +"instance-dns-zone является |%(domain)s|, который располагается в " +"доступной зоне |%(zone)s|. Копия |%(instance)s| расположена в зоне " +"|%(zone2)s|. Запись DNS не будет создана." + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "Арендованный IP |%(address)s|" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "Присвоенный IP |%(address)s|" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "cidr уже используется" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" +"запрошенная cidr (%(cidr)s) конфликтует с существующей супер-сетью " +"(%(super)s)" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" +"запрошенная cidr (%(cidr)s) конфликтует с существующей, меньшей cidr " +"(%(smaller)s)" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "Сеть уже существует!" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "Сеть должна быть исключена из проекта %s перед удалением" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" +"Сетевой диапазон недостаточен для соответствия %(num_networks)s. Размер " +"сети %(network_size)s" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "Этот драйвер поддерживает только тип 'a'" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "Объект Quantum не найден: %s" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "Серверная %(status_code)s ошибка: %(data)s" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "Невозможно подключиться к серверу. Принята ошибка: %s" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "сетевые распределения для копии %s" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "Сервер возвратил ошибку: %s" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "Ошибка создания сетевой записи" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "Нет сети с net_id = %s" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, fuzzy, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "исключение сетевых распределений для копии |%s|" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" +"Подключение интерфейса %(interface_id)s к сети %(net_id)s для " +"%(tenant_id)s" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "Удаление порта %(port_id)s в сети %(net_id)s для %(tenant_id)s" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s не в допустимых приоритетах" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" +"Неполадка '%(e)s', попытка отправить в систему уведомлений. " +"Нагрузка=%(payload)s" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Возврат исключения %s вызывающему" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "неизвлечённый контекст: %s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "получено %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "Выполнение асинхронного вызова %s ..." + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "Отправка оповещения на %s..." + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"Удалённая ошибка: %(exc_type)s %(value)s\n" +"%(traceback)s." + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "Повторное подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "Подключение к серверу AMQP на %(hostname)s:%(port)d" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"Невозможно подключиться к серверу AMQP на %(hostname)s:%(port)d после " +"%(max_retries)d попыток: %(err_str)s" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" +"AMQP сервер на %(hostname)s:%(port)d недоступен: %(err_str)s. Повторная " +"попытка через %(sleep_time)d секунд." + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "Ошибка объявления потребителю темы '%(topic)s': %(err_str)s" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "Ошибка принятия сообщения из очереди: %s" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "Ошибка публикации сообщения в тему '%(topic)s': %(err_str)s" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "Невозможно подключиться к серверу AMQP: %s " + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "Выполняется ли соответствующая служба?" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "Невозможно найти другой compute" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "узел %(dest)s несовместим с исходным узлом %(src)s." + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "Попытка собрать %(num_instances)d копию(й)" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "Отфильтрованы %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, fuzzy, python-format +msgid "Host filter passes for %(host)s" +msgstr "Отфильтрованы %(hosts)s" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "Принято служебное обновление для %(service_name)s от %(host)s." + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "host_manager реализован только для 'compute'" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "Нет службы для compute ID %s" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "Ошибка schedule_%(method)s: %(ex)s" + +#: cinder/scheduler/manager.py:159 +#, fuzzy, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "Копия %(instance_uuid)s не остановлена. (%(vm_state)s" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "Невозможно декодировать параметры расписания: '%(e)s'" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "Имитация выполнения команды (субпроцесс): %s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "Имитация команды привела к исключению %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "Ответ на имитацию команды в stdout='%(stdout)s' stderr='%(stderr)s'" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "Выполняемые копии: %s" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "После завершения работы копий: %s" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "Внутренняя ошибка" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "После принудительного завершения работы копий: %s" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "IPv4" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "IPv6" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "проект" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "Невозможно подтвердить идентификатор экспортированного тома:%s." + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "Заданные данные: %s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "Итоговые данные: %s" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "Создание файлов в %s для имитации гостевого агента" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "Удаление файлов имитации гостевого агента в %s" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, fuzzy, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "Превышена квота" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "_создать: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "_удалить: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "_get: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "_get_all: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "test_snapshot_create: param=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "test_snapshot_create: resp_dict=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "test_snapshot_create_force: param=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "test_snapshot_create_force: resp_dict=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "test_snapshot_show: resp=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "test_snapshot_detail: resp_dict=%s" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"Код состояния: %(_status)s\n" +"Тело: %(_body)s" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "Ошибка аутентификации" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "Ошибка авторизации" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "объект не найден" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "Выполнение %(method)s на %(relative_url)s" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "Тело: %s" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => код %(http_status)s" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "Непредвиденный код состояния" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "Декодирование JSON: %s" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "Получено %s" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "Ошибка открытия соединения c гипервизором" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "Запись Compute_service создана для %s " + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "Запись Compute_service обновлена для %s " + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "Выполненое обновление правил брандмауэра поставщика" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "Добавление правила группы безопасности: %r" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "Добавление правила поставщика: %s" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "Ошибка анализа 'qemu-img info'." + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "Преобразование в необработанный, но текущий формат %s" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" +"Необходимо указать vmwareapi_host_ip,vmwareapi_host_username и " +"vmwareapi_host_password useconnection_type=vmwareapi" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "В vmwareapi:_create_session, принято это исключение: %s" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "В vmwareapi:_call_method, принято это исключение: %s" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "Состояние задачи [%(task_name)s] %(task_ref)s: готово" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "Состояние задачи [%(task_name)s] %(task_ref)s: ошибка %(error_info)s" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "В vmwareapi:_poll_task, принята эта ошибка %s" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"Необходимо указать xenapi_connection_url, xenapi_connection_username " +"(необязательно) и xenapi_connection_password для использования " +"connection_type=xenapi" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "Запуск узла на XenServer не поддерживается." + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" +"Невозможно выполнить вход в XenAPI (недостаточно свободного места на " +"Dom0?)" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "Исключение: %s" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "Домен %s не существует" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "копия %(instance_name)s: удаление файлов копии %(target)s" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "копия %s: Создание образа" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" +"Невозможно получить количество ЦПУ, так как эта возможность не " +"реализована для этой платформы. Эту ошибку можно безопасно пропустить." + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "нет настроенного совместимого обработчика образа" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "неизвестный обработчик образа диска: %s" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "образ уже присоединён" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "Ошибка монтирования файловой системы: %s" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "Ошибка удаления контейнера: %s" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "неподдерживаемый раздел: %s" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "Невозможно прикрепить образ для замыкания: %s" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "разделы не найдена" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "Нет свободных устройств nbd" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "ошибка qemu-nbd: %s" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Подключение к libvirt: %s" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "Неполадка с подключением к libvirt" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy, python-format +msgid "Instance soft rebooted successfully." +msgstr "снимок %s: создание завершено" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "Ошибка перезагрузки копии" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" +"Найдены %(migration_count)d неподтверждённых перемещений, старше " +"%(confirm_window)d секунд" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "Автоматические подтверждение перемещения %d" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "Пользователь не имеет административных привилегий" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "Создание изображения" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "block_device_list %s" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" +"Ошибка libvirt во время поиска %(instance_name)s: [Код ошибки " +"%(error_code)s] %(ex)s" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "устаревшая версия libvirt (не поддерживается getVersion)" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "'' должно быть 1, но %d\n" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +#, fuzzy, python-format +msgid "Instance running successfully." +msgstr "Копия %s: выполнение" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" +"Модуль Libvirt не может быть загружен. NWFilterFirewall не будет работать" +" надлежащим образом." + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "nwfilter(%(instance_filter_name)s) для%(name)s не найден." + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "Брандмауэр iptables: Настройка базовой фильтрации" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "Невозможно найти открытый порт" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "Ошибка отсоединения vif копии '%s'" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "iSCSI-устройство не найдено в %s" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" +"Том ISCSI не найден в: %(mount_device)s. Будет выполнена повторная " +"проверка и попытка. Повторение: %(tries)s" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "Найден узел iSCSI %(mount_device)s (после %(tries)s повторных проверок)" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "Свойство %(attr)s не назначено для управляемого объекта %(objName)s" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "Отсутствуют зарегистрированные ВМ" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "Завершение сеанса с неполадками или уже завершённого сеанса: %s" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "Сеанс имеет неполадки" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "Недопустимый сеанс" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr " Отсутствуют зарегистриованные виртуальные машины" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "Невозможно импортировать suds." + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "Необходимо задать vmwareapi_wsdl_loc" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "Исключение в %s " + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "Перечень копий" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "Всего %s копий" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "Создание ВМ с именем %s на узле ESX" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "Создание ВМ с именем %s на узле ESX" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "Включение питания на копии ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "Питание включено на копии ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "Создание снимка копии ВМ %s " + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "Создание снимка копии ВМ %s " + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "Выгрузка образа %s" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "Выгруженный образ %s" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "Удаление временного файла vmdk %s" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "Удалённый временный файл vmdk %s" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "копия не включена" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "Перезагрузка гостевой ОС ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "Выполнена перезагрузка гостевой ОС ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "Выполнение безотказной перезагрузки ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "копия - %s не представлена" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "Выключение питания ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "Питание ВМ %s выключено" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "Исключение регистрации ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "Незарегистрированная ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "Приостановка ВМ %s " + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "Приостановленная ВМ %s " + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "Питание копии выключено и следовательно она не может быть приостановлена." + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "Возобновление работы ВМ %s" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "Работа ВМ %s возобновлена " + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "копия не в приостановленном состоянии" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "Создание папки с адресом %s" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "Создана папка с адресом %s" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "Получение размера образа для образа %s" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "Принят размер образа %(size)s для образа %(image)s" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake не имеет реализации для %s" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "Выполняется вызов %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "Выполняется вызов getter %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake не имеет реализации для %s или был вызван с использованием " +"неправильным числом аргументов" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "Невозможно принять SR для этого узла: %s" + +#: cinder/virt/xenapi/host.py:169 +#, fuzzy, python-format +msgid "Unable to get updated status" +msgstr "Невозможно получить обновлённое состояние: %s" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "Вызов %(method)s возвратил ошибку: %(e)s." + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "Найдена не уникальная сеть для name_label %s" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "Найдена не уникальная сеть для моста %s" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "Не найдена сеть для моста %s" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, fuzzy, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "Невозможно использовать глобальные полномочия %(role_id)s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +#, fuzzy, python-format +msgid "Created VM" +msgstr "_создать: %s" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD не найдено для копии %s" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "группа %s уже существует" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Невозможно отсоединить VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Невозможно ликвидировать VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Создана VBD %(vbd_ref)s для ВМ %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "Создана VBD %(vbd_ref)s для ВМ %(vm_ref)s, VDI %(vdi_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "Невозможно ликвидировать VDI %s" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"Создан VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) " +"на %(sr_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, fuzzy, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "Отсутствует первичный VDI для%(vm_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "Снимок ВМ %(vm_ref)s с меткой '%(label)s'..." + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "Создан снимок %(template_vm_ref)s из ВМ %(vm_ref)s." + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "Запрос xapi для выгрузки %(vdi_uuids)s в качестве ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "Запрос xapi на приём образа vhd %(image)s" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" +"xapi 'download_vhd' возвратил VDI типа '%(vdi_type)s' с UUID " +"'%(vdi_uuid)s'" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" +"Размер образа %(size_bytes)d превышает допустимый instance_type размер " +"%(allowed_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, fuzzy, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "Получение образа %(image)s" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" +"Превышен размер ядра/Ramdisk образа: %(vdi_size)d байт, макс. " +"%(max_size)d байт" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Копирование VDI %s в /boot/guest на dom0" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Ядро/Ramdisk VDI %s ликвидирован" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy +msgid "Failed to fetch glance image" +msgstr "Ошибка перезагрузки копии" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "Обнаружен %(image_type_str)s формат для образа %(image_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Поиск vdi %s для ядра PV" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "Неизвестный формат образа %(disk_image_type)s" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s до сих пор доступен" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "Повторная проверка SR %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "Невозможно найти SR типа содержимого ISO" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "ISO: поиск SR %(sr_rec)s" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "ISO: не содержимое iso типа" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "ISO: SR с локальной PBD" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "Время ожидания при создании устройства %s" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "Подсоединение VBD %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "Подсоединение VBD %s выполнено." + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "VBD %(vbd_ref)s подсоединено как %(orig_dev)s" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" +"VBD %(vbd_ref)s подсоединено в неправильный dev, изменение назначения на " +"%(dev)s" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "Ликвидирование VBD для VDI %s ... " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "Ликвидирование VBD для VDI %s завершено." + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "Найдено ядро Xen %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "Ядро Xen не найдено. Загрузка HVM." + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "Разделы:" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr " %(num)s: %(ptype)s %(size)d секторов" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" +"Запись таблицы разделов %(primary_first)d %(primary_last)d в " +"%(dev_path)s..." + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "Запись таблицы разделов %s выполнена." + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "Средства XenServer не установлены в этот образ" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" +"Ошибка присоединения файловой системы (ожидаемо для копий не на базе " +"linux): %s" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, fuzzy, python-format +msgid "Updating progress to %(progress)d" +msgstr "Обновление выполнения копии '%(instance_uuid)s' до %(progress)d" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" +"Предпринята попытка включения несуществующей копии, копии с неполадками с" +" идентификатором %s" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy, python-format +msgid "Starting instance" +msgstr "Запуск копии %s" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "Удаление ядра/ramdisk файлов из dom0" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "Ошибка обновления тома в базе данных" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +#, fuzzy, python-format +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" +"Автоматическая настройка диска для копии %(instance_uuid)s, выполняется " +"попытка изменения размера раздела..." + +#: cinder/virt/xenapi/vmops.py:515 +#, fuzzy, python-format +msgid "Invalid value for injected_files: %r" +msgstr "Недопустимое значение для injected_files: '%s'" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "Настройка пароля администратора" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "Восстановление сети" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +#, fuzzy +msgid "Starting VM" +msgstr "Выполнение перезагрузки xvp" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "Запрос версии агента" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "Версия агента копии: %s" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "Обновление агента до %s" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "opaque_ref не может быть определён для '%s'." + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "Готовый снимок и выгрузка для ВМ %s" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "Запуск снимка для ВМ %s" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "Невозможно найти узел для копии %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "Ошибка перемещения vhd на новый узел" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "Изменение размера VDI %(cow_uuid)s с %(old_gb)dГБ до %(new_gb)dГБ" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "Изменение размера VDI %(vdi_uuid)s с %(old_gb)dГБ до %(new_gb)dГБ" + +#: cinder/virt/xenapi/vmops.py:901 +#, fuzzy, python-format +msgid "Resize complete" +msgstr "Изменение размера копии %s завершено" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "Ошибка запроса версии агента: %(resp)r" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "Ошибка обновления агента: %(resp)r" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "Ошибка обмена ключей: %(resp)r" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "Ошибка обновления пароля: %(resp)r" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "Невозможно найти VBD для ВМ" + +#: cinder/virt/xenapi/vmops.py:1097 +#, fuzzy, python-format +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" +"Копия %(instance_uuid)s использует RAW или VHD, пропуск ядра и удаление " +"ramdisk" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "копия содержит ядро или ramdisk, но не оба" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "файлы ядра/ramdisk удалены" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +#, fuzzy +msgid "Destroying VM" +msgstr "Выполнение перезагрузки xvp" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "ВМ не предоставлена, пропуск выполнения ликвидации..." + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "Копия в состоянии режима восстановления: %s" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +#, fuzzy, python-format +msgid "Automatically hard rebooting" +msgstr "Автоматическая безотказная перезагрузка %d" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, fuzzy, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "Завершение работы ВМ для копии %(instance_uuid)s" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "Копия %(instance_id)s не найдена" + +#: cinder/virt/xenapi/vmops.py:1383 +#, fuzzy, python-format +msgid "In ERROR state" +msgstr "Ошибка БД: %s" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "Невозможно получить сведения о пропускной способности." + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy +msgid "Injecting network info to xenstore" +msgstr "установка сетевого узла" + +#: cinder/virt/xenapi/vmops.py:1483 +#, fuzzy +msgid "Creating vifs" +msgstr "Создание изображения" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "Создание VIF для ВМ %(vm_ref)s, сеть %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "Создание VIF для ВМ %(vm_ref)s, сеть %(network_ref)s." + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, fuzzy, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" +"Вызов агентом %(method)s возвратил недопустимый ответ: %(ret)r. VM " +"id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" + +#: cinder/virt/xenapi/vmops.py:1566 +#, fuzzy, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" +"ВРЕМЯ ПРОСТОЯ: Срок вызова %(method)s истёк. VM id=%(instance_uuid)s; " +"args=%(args)r" + +#: cinder/virt/xenapi/vmops.py:1570 +#, fuzzy, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" +"НЕ РЕАЛИЗОВАНО: Вызов %(method)s не поддерживается агентом. VM " +"id=%(instance_uuid)s; args=%(args)r" + +#: cinder/virt/xenapi/vmops.py:1575 +#, fuzzy, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "Вызов %(method)s возвратил ошибку: %(e)s." + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "Ошибка OpenSSL: %s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "создание sr в volume_utils" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "тип = %s" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "наименование = %s" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "Создано %(label)s как %(sr_ref)s." + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "Невозможно создать репозиторий хранилища" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "внедрение sr в volume_utils" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "внедрён %(label)s в качестве %(sr_ref)s." + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "Создание pbd для SR" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "Подключение SR" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "Невозможно внедрить Репозиторий хранения" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "Невозможно получить SR с использованием uuid" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "Забывание SR %s..." + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "Невозможно забыть Репозиторий хранения" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "Внедрение %s..." + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "Невозможно найти SR из VBD %s" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "Пропуск исключения %(exc)s при получении PBD для %(sr_ref)s" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "Пропуск исключения %(exc)s при отсоединении PBD %(pbd)s" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "Невозможно внедрить VDI на SR %s" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Невозможно получить запись VDI %s на" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Невозможно внедрить VDI для SR %s" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "Ошибка поиска vdis в SR %s" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "Невозможно получить сведения назначения %(data)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Точка подключения не может быть переведена: %s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "Создание SR %s" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "Невозможно создать SR" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "Невозможно получить запись SR" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "Внедрение SR %s" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "SR найдено в базе данных xapi. Нет необходимости во внедрении" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "Невозможно внедрить SR" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "Проверка SR %s" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "SR %s не найден в базе данных xapi" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "Невозможно забыть SR" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Невозможно создать VDI на SR %(sr_ref)s для копии %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Невозможно использовать SR %(sr_ref)s для копии %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Невозможно присоединить том для копии %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Точка подключения %(mountpoint)s присоединена к копии %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Невозможно найти том %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Невозможно отсоединить том %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "Невозможно ликвидировать vbd %s" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "Ошибка очистки SR %s" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Точка подключения %(mountpoint)s отсоединена от копии %(instance_name)s" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "Ошибка в соглашении: %s" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "Недопустимый запрос: %s" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "Запрос: %s" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "Запрос сделан с отсутствующим токеном: %s" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "Запрос сделан с недопустимым токеном: %s" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "Непредвиденная ошибка: %s" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "Запуск узла сети cinder-xvpvncproxy (версия %s)" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "Превышена квота для %(pid)s, попытка создания тома %(size)sG" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Состояние тома должно быть доступно" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "Восстановление после недопустимого выполнения. Попытка номер %s" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "том группы %s не существует" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" +"Невозможно подтвердить идентификатора экспортированного " +"тома:%(volume_id)s." + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "rbd не имеет пула %s" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog не выполняется: %s" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "Sheepdog не выполняется" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "Повторное экспортирование %s томов" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "том %s: пропуск экспортирования" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "том %s: создание" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "том %(vol_name)s: создание lv объёмом %(vol_size)sG" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "том %s: создание экспортирования" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "том %s: создание завершено" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "Том до сих пор присоединён" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "том %s: удаление экспортирования" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "том %s: удаление" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "том %s: том занят" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "том %s: удаление завершено" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "снимок %s: создание" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "снимок %(snap_name)s: создание" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "снимок %s: удаление" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "снимок %s: создание завершено" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "снимок %s: удаление выполнено" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "Проверка возможностей тома" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "Обнаружены новые возможности: %s" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "Очистить возможности" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "Принято уведомление {%s}" + +#: cinder/volume/netapp.py:79 +#, fuzzy, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "Образ %(image_id)s недопустим: %(reason)s" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Переподлючено к очереди" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "Ошибка поиска vbd для vdi %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "Невозможно найти экспортирование iSCSI для тома %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "Задайте san_password или san_private_key" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "san_ip должен быть назначен" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "LUID не найден для %(zfs_poolname)s. Вывод=%(out)s" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "Возврат команды CLIQ %s" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" +"Неправильный ответ на команду CLIQ %(verb)s %(cliq_args)s. " +"Результат=%(out)s" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "Ошибка выполнения команды CLIQ %(verb)s %(cliq_args)s. Результат=%(out)s" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" +"Непредвиденное количество виртуальных ip для кластера %(cluster_name)s. " +"Результат=%(_xml)s" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "Сведения о томе: %(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "local_path не поддерживается" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "Невозможно определить проект для тома %s, невозможно экспортировать" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "Ошибка создания sr %s...продолжение" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "Ошибка создания" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "Ошибка обновления базы данных" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "Ошибка внедрения sr %s...продолжение" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "XenSMDriver необходимо xenapi соединение" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "Ошибка начала сеанса" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "Том будет создан во внутреннем интерфейсе - %d" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "Ошибка обновления тома в базе данных" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "Невозможно создать том" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "Ошибка удаления vdi" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "Ошибка удаления тома в базе данных" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "Ошибка поиска тома в базе данных" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "Ошибка поиска внутреннего интерфейса в базе данных" + +#: cinder/volume/nexenta/__init__.py:27 +#, fuzzy, python-format +msgid "Nexenta SA returned the error" +msgstr "Сервер возвратил ошибку: %s" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "Заданные данные: %s" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "ответ %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "том группы %s не существует" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "Параметры доступы Cinder не заданы." + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "Виртуальный массив для хранения данных %(id)d не найден." + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "Виртуальный массив для хранения данных %(name)s не найден." + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "Тело не предоставлено" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "Создать VSA %(display_name)s типа %(vc_type)s" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "Удалить VSA с идентификатором: %s" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "Ассоциировать адрес %(ip)s с VSA %(id)s" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "Исключить адрес для VSA %(id)s" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "%(obj)s с идентификатором %(id)s не найден" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" +#~ "%(obj)s с идентификатором %(id)s относится " +#~ "к VSA %(own_vsa_id)s и не VSA " +#~ "%(vsa_id)s." + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "Index. vsa_id=%(vsa_id)s" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "Создать том %(size)s ГБ из VSA ID %(vsa_id)s" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "Обновить %(obj)s с идентификатором: %(id)s, изменения: %(changes)s" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "Принудительное удаление вм %(instance_uuid)s, даже если она удалена" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" +#~ "Копия %(instance_uuid)s не существует в " +#~ "БД, но будет выполнено выключение " +#~ "используя особый контекст" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "попытка ликвидации уже ликвидированной копии: %s" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "Невозможно перейти на предыдущую версию без потери данных" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" +#~ "Сервер AMQP на %(fl_host)s:%(fl_port)d " +#~ "недоступен: %(e)s. Очередная попытка через " +#~ "%(fl_intv)d секунд." + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" +#~ "Невозможно подключиться к серверу AMQP " +#~ "после выполнения %(tries)d попыток. Завершение" +#~ " работы." + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "Ошибка получения сообщения из очереди: %s" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "тема %s" + +#~ msgid "message %s" +#~ msgstr "сообщение %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "_filter_hosts: %(request_spec)s" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "На узле %s недостаточно свободного места. Пропуск" + +#~ msgid "Filter hosts: %s" +#~ msgstr "Фильтр узлов: %s" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "Выбрано макс. количество узлов (%d)" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "Том обеспечения %(name)s размером %(size)s ГБ на узле %(host)s" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "%(i)d: Том %(name)s" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "Ошибка создания томов" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "Том не-VSA %d" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "Ошибка создания тома" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "Возможность для тома, объёмом %(size)s, не выбрана" + +#~ msgid "Host %s:" +#~ msgstr "Узел %s:" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" +#~ "\tДиск %(qosgrp)-25s: всего %(total)2s, " +#~ "занято %(used)2s, свободно %(free)2s. " +#~ "Доступный объём %(avail)-5s" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "\t LeastUsedHost: Наилучший узел: %(best_host)s. (занято %(min_used)s)" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" +#~ "\t MostAvailCap: Наилучший узел: " +#~ "%(best_host)s. (доступно %(max_avail)s %(type_str)s)" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "Объявление очереди %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Объявление точки обмена %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "Получение из %(queue)s: %(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "Тест: user_data = %s" + +#~ msgid "_create: param=%s" +#~ msgstr "_create: param=%s" + +#~ msgid "Host %s" +#~ msgstr "Узел %s" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "Проверка: обеспечение vol %(name)s на узле %(host)s" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "\t vol=%(vol)s" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "Тест: запрос обновления VSA: vsa_id=%(vsa_id)s values=%(values)s" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "Тест: Создание тома: %s" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "Тест: Том получает запрос: id=%(volume_id)s" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "Тест: Запрос обновления тома: id=%(volume_id)s значения=%(values)s" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "Тест: Том получает: id=%(volume_id)s" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "Состояние задачи [%(name)s] %(task)s: готово %(result)s" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "Состояние задачи [%(name)s] %(task)s: %(status)s %(error_info)s" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "Невозможно прочитать консоль LXC" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" +#~ "в xml...\n" +#~ ":%s " + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "Созданная ВМ %s..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "Созданная ВМ %(instance_name)s как %(vm_ref)s." + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Создание особого CDROM VBD для ВМ %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" +#~ "Создана VBD на основе CDROM %(vbd_ref)s" +#~ " для ВМ %(vm_ref)s, VDI %(vdi_ref)s." + +#~ msgid "Image Type: %s" +#~ msgstr "Тип образа: %s" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "ISO: Найден sr, возможно содержащий образ ISO" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "Создание VBD для VDI %s ... " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "Создание VBD для VDI %s выполнено." + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "Пропуск XenAPI.Failure в VBD.unplug: %s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "Пропуск XenAPI.Failure %s" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "Запуск ВМ %s..." + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "Копия %s: ожидание запуска" + +#~ msgid "Resources to remove:%s" +#~ msgstr "Ресурсы для удаления:%s" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "Пропуск ликвидации VDI для %s" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "Ликвидирование VDI для копии %(instance_uuid)s" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "Копия %(instance_uuid)s ВМ ликвидирована" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "Ликвидирование ВМ для копии %(instance_uuid)s" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "создание vif(s) для вм: |%s|" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "Создан VIF %(vif_ref)s для ВМ %(vm_ref)s, сеть %(network_ref)s." + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" +#~ "Вызов %(method)s возвратил ошибку: %(e)s. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "Создание VBD для ВМ %(vm_ref)s, VDI %(vdi_ref)s ... " + +#~ msgid "Error destroying VDI" +#~ msgstr "Ошибка ликвидации VDI" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "\tТом %s не является VSA томом" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "\tСоздание тома FE VSA %s - ничего не выполнять" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "Ошибка VSA BE create_volume для %s" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "VSA BE create_volume для %s выполнено" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "\tУдаление FE VSA тома %s - ничего не выполнять" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "Ошибка VSA BE delete_volume для %s" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "VSA BE delete_volume для %s выполнено" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "\tТом FE VSA %s создать экспортирование - ничего не выполнять" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "\tТом FE VSA %s удалить экспортирование - ничего не выполнять" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "Ошибка приёма сведений QoS" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "*** Экспериментальный код VSA ***" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "Запрошенное количество VC (%d) избыточно. Назначение по умолчанию" + +#~ msgid "Creating VSA: %s" +#~ msgstr "Создание VSA: %s" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Создать " +#~ "том %(vol_name)s, %(vol_size)d ГБ, тип " +#~ "%(vol_type_id)s" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "VSA ID %(vsa_id)d: Обновить состояние VSA на %(status)s" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "VSA ID %(vsa_id)d: Обновить вызов VSA" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "Добавление %(add_cnt)s VC в VSA %(vsa_name)s." + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "Удаление %(del_cnt)s VC из VSA %(vsa_name)s." + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "VSA ID %(vsa_id)s: Удаление %(direction)s тома %(vol_name)s" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "Невозможно удалить том %s" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" +#~ "VSA ID %(vsa_id)s: Принудительное удаление." +#~ " %(direction)s том %(vol_name)s" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "Выполнение завершения работы VSA ID %s" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "VSA ID %(vsa_id)s: Удалить копию %(name)s" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "Создать вызов полученный для VSA %s" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "Ошибка поиска VSA %(vsa_id)d" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "VSA ID %(vsa_id)s: Создан накопитель %(vol_id)s. Состояние %(status)s" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "Накопитель %(vol_name)s (%(vol_disp_name)s) в фазе создания - подождите" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/tl/LC_MESSAGES/nova.po b/cinder/locale/tl/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..206650836f5 --- /dev/null +++ b/cinder/locale/tl/LC_MESSAGES/nova.po @@ -0,0 +1,8200 @@ +# Tagalog translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Tagalog \n" +"Plural-Forms: nplurals=2; plural=(n != 1)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "Kailangan bang gumamit ng CA bawat proyekto?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Merong hindi-inaasahang pagkakamali habang tumatakbo ang command." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "natanggap %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "walang paraan para sa mensahe: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Walang paraan para sa mensahe: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Muling kumonekta sa queue" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/tr/LC_MESSAGES/nova.po b/cinder/locale/tr/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..e2212c4cea3 --- /dev/null +++ b/cinder/locale/tr/LC_MESSAGES/nova.po @@ -0,0 +1,8202 @@ +# Turkish translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-12-14 18:10+0000\n" +"Last-Translator: FULL NAME \n" +"Language-Team: Turkish \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/uk/LC_MESSAGES/nova.po b/cinder/locale/uk/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..1af157a1ca1 --- /dev/null +++ b/cinder/locale/uk/LC_MESSAGES/nova.po @@ -0,0 +1,8199 @@ +# Ukrainian translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2011-08-23 11:21+0000\n" +"Last-Translator: Thierry Carrez \n" +"Language-Team: Ukrainian \n" +"Plural-Forms: nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && " +"n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Ім'я файлу секретного ключа" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "Шлях до збережених ключів" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "Неочікувана помилка при виконанні команди." + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "без порядку для повідомлень: %s" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "Занадто багато невдалих аутентифікацій." + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "Це правило вже існує в групі %s" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "Вилучити групу безпеки %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "Від'єднати том %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "Від'єднати том %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "лише група \"всі\" підтримується" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Це правило вже існує в групі %s" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "отримано %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +msgid "Guest does not have a console available" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +msgid "Volume status must be available or error" +msgstr "" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +msgid "Volume Snapshot status must be available or error" +msgstr "" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +#, fuzzy +msgid "Connected to DFM server" +msgstr "Оновлено з'єднання до черги" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, python-format +msgid "No LUN ID for volume %s" +msgstr "" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "відповідь %s" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "заголовок %s" + +#~ msgid "message %s" +#~ msgstr "повідомлення %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "Оголошення черги %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "Оголошення точки обміну %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "Volume status must be available" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/locale/zh_CN/LC_MESSAGES/nova.po b/cinder/locale/zh_CN/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..165b7b5e155 --- /dev/null +++ b/cinder/locale/zh_CN/LC_MESSAGES/nova.po @@ -0,0 +1,8064 @@ +# Chinese (Simplified) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-04-03 23:36+0000\n" +"Last-Translator: cheesecake \n" +"Language-Team: Chinese (Simplified) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "根证书的文件名" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "私钥文件名" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "根证书撤销列表的文件名" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "保存密钥的位置" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "保存根证书的位置" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "是否每个项目都使用认证权威?" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "用户证书的标题,%s 依次分别为项目,用户,时间戳" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "项目证书的标题,%s 依次分别为项目,时间戳" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "标记所在路径:%s" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "运行命令时出现意外错误" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令:%(cmd)s\n" +"退出代码:%(exit_code)s\n" +"标准输出:%(stdout)r\n" +"标准错误输出:%(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "数据库异常被包裹。" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "发生未知异常。" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "解密文本失败" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "无法在镜像服务中翻页" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "虚拟接口创建失败" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "用唯一mac地址5次尝试创建虚拟接口失败" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "连接到glance失败" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "连接到melange失败" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "未授权。" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "用户没有管理员权限" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "政策不允许 %(action)s 被执行。" + +#: cinder/exception.py:216 +#, fuzzy, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "没有为镜像 %(image_id)s 找到内核。" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "无法接受的参数。" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "无效的快照" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "卷 %(volume_id)s 没有附加任何东西" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "密钥对数据无效" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "把数据加载为json格式失败" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "请求无效。" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "无效签名 %(signature)s 针对用户是 %(user)s" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "收到无效的输入" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "无效的实例类型 %(instance_type)s。" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "无效的卷类型" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "无效的卷" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "无效的端口范围 %(from_port)s:%(to_port)s. %(msg)s" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "无效的IP协议 %(protocol)s。" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "无效的内容类型 %(content_type)s。" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "无效的cidr %(cidr)s。" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "无效的RPC连接重用。" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "%(err)s" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "无法执行action '%(action)s' 于聚合 %(aggregate_id)s 上。原因: %(reason)s。" + +#: cinder/exception.py:301 +#, fuzzy, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "找不到有效主机,原因是 %(reason)s。" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "实例 %(instance_uuid)s 处于%(attr)s %(state)s 中。该实例在这种状态下不能执行 %(method)s。" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "实例 %(instance_id)s 没有运行。" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "实例 %(instance_id)s 没有挂起。" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "实例 %(instance_id)s 不在救援模式。" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "挂起实例失败" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "服务器恢复失败" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "重新启动实例失败" + +#: cinder/exception.py:334 +#, fuzzy +msgid "Failed to terminate instance" +msgstr "重新启动实例失败" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "该时刻服务无法使用。" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "该时刻卷服务无法使用。" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "该时刻计算服务无法使用。" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "无法把实例 (%(instance_id)s) 迁移到当前主机 (%(host)s)。" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "该时刻目标计算主机无法使用。" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "该时刻原始计算主机无法使用。" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "提供的虚拟机管理程序类型无效。" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "该实例需要比当前版本更新的虚拟机管理程序。" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "提供的磁盘路径 (%(path)s) 已经存在,预计是不存在的。" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "提供的设备路径 (%(path)s) 是无效的。" + +#: cinder/exception.py:381 +#, fuzzy, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "提供的设备路径 (%(path)s) 是无效的。" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "无法接受的CPU信息" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "%(address)s 不是有效的IP v4/6地址。" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "VLAN标签对于端口组%(bridge)s 是不适合的。预计的VLAN标签是 %(tag)s,但与端口组关联的是 %(pgroup)s。" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" +"包含端口组 %(bridge)s 的vSwitch没有与预计的物理适配器关联。预计的vSwitch是 %(expected)s,但关联的是 " +"%(actual)s。" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "磁盘格式 %(disk_format)s 不能接受" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "实例 %(instance_id)s 无法接受,原因是: %(reason)s" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "Ec2 id %(ec2_id)s 无法接受。" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "资源没有找到。" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "要求的标记 %(flag)s 没有设置。" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "卷 %(volume_id)s 没有找到。" + +#: cinder/exception.py:435 +#, fuzzy, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "无法找到帐户 %(account_name) on Solidfire 设备" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "没有为实例 %(instance_id)s 找到卷。" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "没有找到卷类型。" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "卷类型 %(volume_type_id)s 没有找到。" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "快照 %(snapshot_id)s 没有找到。" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "正在删除有快照的卷 %(volume_name)s" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "没有为卷 %(volume_id)s 找到目标id。" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "在 %(location)s 没有磁盘" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "无法为 %(driver_type)s 卷找到句柄。" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "无效的镜像href %(image_href)s。" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "有些镜像通过hrefs存储。该api版本不支持显示镜像hrefs。" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "镜像 %(image_id)s 没有找到。" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "没有为镜像 %(image_id)s 找到内核。" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "用户 %(user_id)s 没有找到。" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "项目 %(project_id)s 没有找到。" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "用户 %(user_id)s 不是项目 %(project_id)s 的成员。" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "角色 %(role_id)s 没有找到。" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "没有找到存储库来读写VDI。" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "创建网络 %(req)s 是必要的。" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "网络 %(network_id)s 没有找到。" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "无法为桥 %(bridge)s 找到网络" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "无法为uuid %(uuid)s 找到网络" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "无法为cidr %(cidr)s 找到网络。" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "无法为实例 %(instance_id)s 找到网络。" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "没有网络定义。" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "或者网络uuid %(network_uuid)s不存在,或者它没有分配给项目 %(project_id)s。" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "主机没有设置于网络 (%(network_id)s)。" + +#: cinder/exception.py:566 +#, fuzzy, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "网络 %s 存在活跃的端口,无法删除" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "无法找到虚拟机使用的数据存储引用。" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "没有固定IP与id %(id)s 关联。" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "没有为地址 %(address)s 找到固定IP。" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "实例 %(instance_id)s 没有固定ip。" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "网络主机 %(host)s 在网络 %(network_id)s 中没有固定ip。" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "实例 %(instance_id)s 没有固定ip '%(ip)s'。" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "主机 %(host)s 没有固定IP。" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "固定IP地址 (%(address)s) 在网络 (%(network_uuid)s) 中不存在。" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "固定IP地址 %(address)s 已在使用。" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "固定IP地址 %(address)s 无效。" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "没有固定ip可用。" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "找不到固定IP。" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "找不到适合id %(id)s 的浮动IP。" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "DNS 入口 %(name)s 已经在域中 %(domain)s 存在。" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "找不到适合地址 %(address)s 的浮动ip。" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "没有为主机 %(host)s 找到浮动IP。" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "没有浮动IP可用。" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "浮动ip %(address)s 已被关联。" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "浮动ip %(address)s 没有被关联。" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "没有浮动ip存在。" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "接口 %(interface)s没有找到。" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "密钥对 %(name)s 没有为用户 %(user_id)s 找到。" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "证书 %(certificate_id)s 没有找到。" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "服务 %(service_id)s 没有找到。" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "主机 %(host)s 没有找到。" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "计算主机 %(host)s 没有找到。" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "认证令牌 %(token)s 没有找到。" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "访问密钥 %(access_key)s 没有找到。" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "配额没有找到。" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "没有为项目 %(project_id)s 找到配额。" + +#: cinder/exception.py:696 +#, fuzzy, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "安全组 %(security_group_id)s 没有找到。" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "没有找到安全组 %(security_group_id)s 针对项目 %(project_id)s 。" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "带有规则 %(rule_id)s 的安全组没有找到。" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "安全组 %(security_group_id)s 已经与实例 %(instance_id)s 关联。" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "安全组 %(security_group_id)s 没有与实例 %(instance_id)s 关联。" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "迁移 %(migration_id)s 没有找到。" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "没有为实例 %(instance_id)s 找到迁移其状态为 %(status)s 。" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "控制台池 %(pool_id)s 没有找到。" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "没有找到类型是 %(console_type)s 的控制台池针对计算主机 %(compute_host)s 在代理主机 %(host)s 上。" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "控制台%(console_id)s 没有找到。" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "没有为实例 %(instance_id)s 找到控制台。" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "没有为实例 %(instance_id)s 在池 %(pool_id)s 中找到控制台。" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "无效的控制台类型 %(console_type)s " + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "没有找到实例类型。" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "实例类型 %(instance_type_id)s 没有找到。" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "名为 %(instance_type_name)s 的实例类型没有找到。" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "类型 %(flavor_id)s 没有找到。" + +#: cinder/exception.py:776 +#, fuzzy, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "区域 %(zone_id)s 没有找到。" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "调度器的成本函数 %(cost_fn_str)s 没有找到。" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "未找到调度器的权重标记:%(flag_name)s" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "实例 %(instance_id)s 没有键为 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "实例类型 %(instance_type_id)s 没有额外的 键为%(extra_specs_key)s 的规格说明。" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "未定义LDAP对象" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "没有找到LDAP用户 %(user_id)s。" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "没有找到LDAP用户组 %(group_id)s。" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "LDAP用户 %(user_id)s 不是 %(group_id)s 的组成员。" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "找不到文件 %(file_path)s。" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "没找到文件" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "未找到与网络适配器 %(adapter)s 关联的虚拟交换机。" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "未找到网络适配器 %(adapter)s。" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "找不到类 %(class_name)s :异常 %(exception)s" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "操作不允许。" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "无法使用全局角色 %(role_id)s" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "快照不允许循环。" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "备份 image_type 要求循环参数。" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "密钥对 %(key_name)s 已经存在。" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "用户 %(user)s 已存在。" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "LDAP用户 %(user)s 已经存在。" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "LDAP用户组 %(group)s 已经存在。" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "用户 %(uid)s 已经是 组 %(group_dn)s 中的成员" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "项目 %(project)s 已经存在。" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "实例 %(name)s 已经存在。" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "实例类型 %(name)s 已经存在。" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "卷类型 %(name)s 已经存在。" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "%(path)s 在共享存储上:%(reason)s" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "迁移错误" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "错误格式的消息体: %(reason)s" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "在 %(path)s 找不到配置文件。" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "无法从路径 %(path)s 中加载应用 '%(name)s'" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "调整时实例的大小必须要发生变化。" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "镜像比实例类型所允许的大。" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "1个或多个区域无法完成请求。" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "实例类型的内存对于所请求的镜像太小。" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "实例类型的磁盘对于所请求的镜像太小。" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "没有足够的可用内存来启动计算节点 %(uuid)s。" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "无法获取此主机的带宽、CPU和磁盘指标。" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "找不到有效主机,原因是 %(reason)s。" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "主机 %(host)s 没有启动或者不存在。" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "超出配额" + +#: cinder/exception.py:958 +#, fuzzy, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "聚合 %(aggregate_id)s没有主机 %(host)s。" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "找不到聚合 %(aggregate_id)s。" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "聚合 %(aggregate_name)s 已经存在。" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "聚合 %(aggregate_id)s没有主机 %(host)s。" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "聚合 %(aggregate_id)s 没有键为 %(metadata_key)s 的元数据。" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "主机 %(host)s 已经是另外一个聚合的成员。" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "聚合 %(aggregate_id)s已经有主机 %(host)s。" + +#: cinder/exception.py:988 +#, fuzzy, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "检测到不止一个名称为 %(vol_name) 的卷。" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "无法创建实例类型。" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "来自SolidFire API的错误响应" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "SolidFire API响应里发生错误:status=%(status)s" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "SolidFire API响应里发生错误:data=%(data)s" + +#: cinder/exception.py:1013 +#, fuzzy, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "检测到已存在的id为%(vlan) vlan" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "实例 %(instance_id)s 没有找到。" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, fuzzy, python-format +msgid "Could not fetch image %(image)s" +msgstr "获取镜像 %(image)s" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "syslog设备必须作为一个 %s 。" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "跳过 %(full_task_name)s,到下次运行还剩下%(ticks_to_skip)s 跳。" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "正在运行周期性任务 %(full_task_name)s" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "在 %(full_task_name)s 期间发生的错误:%(e)s" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "向调度器通报能力。" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "JSON文件表示策略。" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "请求的规则找不到时的检查缺省规则。" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "启动 %(topic)s 节点 (版本 %(vcs_string)s)" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "为服务 %s 创建消费者" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "服务数据库对象消失,正在重新创建。" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "标记全集:" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "%(flag)s:标记集合 " + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "内层异常:%s" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "正在抓取 %s" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "发现未知的 utils.execute 关键字参数:%r" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "正在运行cmd (subprocess):%s" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "运行结果为 %s" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "%r 失败,重试。" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "运行cmd (SSH):%s" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "SSH上不支持环境变量" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "SSH上不支持的进程输入参数。" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "回调中debug:%s" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "本地IP地址没有找到:%s" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "后台 %s" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "循环调用中。" + +#: cinder/utils.py:927 +#, fuzzy, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "正在试图获取信号量 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/utils.py:931 +#, fuzzy, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "获得信号量 \"%(lock)s\" 为方法 \"%(method)s\" ...锁" + +#: cinder/utils.py:935 +#, fuzzy, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "正在 试图获取锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/utils.py:942 +#, fuzzy, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "获得文件锁 \"%(lock)s\" 为方法 \"%(method)s\"...锁" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "期望的对象类型:%s" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "不正确的server_string:%s" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "timefunc:'%(name)s' 用了%(total_time).2f 秒" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "正在丢弃原来的异常。" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "类 %(fullname)s 是不推荐的:%(msg)s" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "类 %(fullname)s 是不推荐的" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "函数 %(name)s 在%(location)s 里的是不推荐的:%(msg)s" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "函数 %(name)s 在%(location)s 里的是不推荐的" + +#: cinder/utils.py:1681 +#, fuzzy, python-format +msgid "Could not remove tmpdir: %s" +msgstr "移除容器失败:%s" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "启动%(name)s 位置在 %(host)s:%(port)s" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "关闭WSGI服务器" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "正在停止裸TCP服务器。" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "正在启动TCP服务器 %(arg0)s 位置在%(host)s:%(port)s" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "WSGI服务器已经停止。" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "你必须执行 __call__" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "不可用" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "返回的non-serializeable类型:%s" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "%(code)s: %(message)s" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "FaultWrapper: %s" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "认证失败过多" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "访问密钥 %(access_key)s 有错误 %(failures)d,认证失败将被锁定 %(lock_mins)d 分钟。" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "签名没有提供" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "访问密钥没有提供" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "与keystone交流失败" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "认证失败:%s" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "为%(uname)s:%(pname)s 验证通过的请求" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "执行: %s" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "arg: %(key)s\t\tval: %(value)s" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "为controller=%(controller)s 以及 action=%(action)s未验证的请求" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "引起异常 InstanceNotFound: %s" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "引起异常 VolumeNotFound: %s" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "引起异常 SnapshotNotFound: %s" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "引起异常 NotFound: %s" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "引起异常 EC2APIError: %s" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "引起异常 KeyPairExists: %s" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "引起异常 InvalidParameterValue: %s" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "引起异常 InvalidPortRange: %s" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "引起异常 NotAuthorized: %s" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "引起异常 InvalidRequest: %s" + +#: cinder/api/ec2/__init__.py:633 +#, fuzzy, python-format +msgid "QuotaError raised: %s" +msgstr "引起意外的错误:%s" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "引起意外的错误:%s" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "Environment: %s" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "发生了一个未知的错误. 请重试你的请求." + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "不支持的API请求:controller = %(controller)s,action = %(action)s" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "创建卷 %s 的快照" + +#: cinder/api/ec2/cloud.py:372 +#, fuzzy, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "参数GroupName的值 (%s) 无效。内容仅限于含有字母数字的字符,空格,破折号和下划线。" + +#: cinder/api/ec2/cloud.py:378 +#, fuzzy, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "参数GroupName的值 (%s) 无效。长度超过了上限255。" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "创建密钥对 %s" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "导入密钥 %s" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "删除密钥对 %s" + +#: cinder/api/ec2/cloud.py:551 +#, fuzzy +msgid "Invalid CIDR" +msgstr "无效的" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "撤销 %s 安全组入口权限" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, fuzzy, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "参数不够创建有效规则。" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "对给定的参数无特定规则。" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "授权 %s 安全组入口权限" + +#: cinder/api/ec2/cloud.py:725 +#, fuzzy, python-format +msgid "%s - This rule already exists in group" +msgstr "这条规则已经存在于组%s 中" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "参数GroupName的值 (%s) 无效。内容仅限于含有字母数字的字符,空格,破折号和下划线。" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "参数GroupName的值 (%s) 无效。长度超过了上限255。" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "创建安全组 %s" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "安全组 %s 已经存在" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "删除安全组 %s" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "获取实例 %s 控制台输出" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "从快照 %s 创建卷" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "创建 %s GB的卷" + +#: cinder/api/ec2/cloud.py:921 +#, fuzzy +msgid "Delete Failed" +msgstr "创建失败" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "把卷 %(volume_id)s 附加到实例 %(instance_id)s 上位置在 %(device)s" + +#: cinder/api/ec2/cloud.py:939 +#, fuzzy +msgid "Attach Failed." +msgstr "创建失败" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "分离卷 %s" + +#: cinder/api/ec2/cloud.py:959 +#, fuzzy, python-format +msgid "Detach Volume Failed." +msgstr "分离卷 %s" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "属性不支持: %s" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "vol = %s\n" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "分配地址" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "释放地址 %s" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "把地址 %(public_ip)s 关联到实例 %(instance_id)s" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "取消地址 %s 的关联" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "镜像必须可用。" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "准备开始终止实例" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "重启实例 %r" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "准备停止实例" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "准备启动实例" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "解除镜像 %s 的注册" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "用id %(image_id)s 注册镜像 %(image_location)s" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "用户或者组没有确定" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "仅仅支持组\"all\"" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "operation_type必须添加或者移除" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "正在更新镜像 %s 的 publicity 属性" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "无法在 %d 秒内停止实例" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "抓到错误:%s" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s 随HTTP %(status)d返回" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "必须明确一个ExtensionManager类" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "扩展%(ext_name)s:无法扩展资源 %(collection)s:没有那种资源" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "扩展资源的扩展 %(ext_name)s:%(collection)s" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "%(user_id)s 通过令牌 '%(token)s' 是找不到的" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "%(user_id)s 必须是 %(project_id)s 的管理员或者成员" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "认证请求必须针对root版本(例如 /v2)。" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "请求中找不到 %s" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "成功验证 '%s'" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "没有为提供的API密钥找到用户。" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "提供的API密钥有效,但并不是给用户 '%(username)s' 的。" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "limit 参数必须是整数" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "limit参数必须是正数" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "offset 参数必须是整数" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "offset 参数必须是正数" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "没有找到标记 [%s]" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "href %s 不包含版本" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "超过镜像元数据限制" + +#: cinder/api/openstack/common.py:295 +#, fuzzy, python-format +msgid "Converting nw_info: %s" +msgstr "实例的network_info:|%s|" + +#: cinder/api/openstack/common.py:305 +#, fuzzy, python-format +msgid "Converted networks: %s" +msgstr "意外错误:%s" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "'%(action)s' 针对处于 %(attr)s %(state)s 的实例是无法进行" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "实例针对 '%(action)s' 处于无效状态" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "拒绝快照请求,快照当前未被激活" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "该时刻实例快照是不允许的。" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "加载的扩展:%s" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "Ext name: %s" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "Ext alias: %s" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "Ext 描述: %s" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "Ext 命名空间: %s" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "Ext updated: %s" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "加载扩展发生异常:%s" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "正在加载扩展 %s" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "调用扩展工厂 %s" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "加载扩展 %(ext_factory)s 失败:%(exc)s" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "加载扩展 %(classpath)s 失败:%(exc)s" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "加载扩展 %(ext_name)s 失败:%(exc)s" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "无法理解JSON" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "无法理解XML" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "过多主体密钥" + +#: cinder/api/openstack/wsgi.py:582 +#, fuzzy, python-format +msgid "Exception handling resource: %s" +msgstr "扩展资源:%s" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "错误抛出: %s" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 异常抛出:%s" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "请求中提供了无法识别的 Content-Type" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "请求中没有提供 Content-Type" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "请求中没有提供主体" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "没有该动作:%s" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "错误格式的请求主体" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "不支持的Content-Type" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "错误格式的请求url" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "%(url)s返回错误:%(e)s" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "元素不是子节点" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "根元素选择列表" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "模板数不匹配;把slave %(slavetag)s 添加到master %(mastertag)s" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "subclasses必须执行construct()!" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "正在初始化扩展管理员。" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "镜像没有找到。" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "不正确的请求主体格式" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "请求主体和URI不匹配" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "请求主体包含太多items" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "无效的元数据键" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "实例不存在" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "实例并不是指定网络的成员" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 %(unit_string)s。" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "这个请求受到频率限制。" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "服务器不存在" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "元数据项目未找到" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "无效的服务器状态:%(status)s" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "无效的changes-since值" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "超过个性化文件限制" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "个性化文件路径太长" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "个性化文件内容太长" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "服务器名称不是字符串或者unicode" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "服务器名称是空串" + +#: cinder/api/openstack/compute/servers.py:509 +#, fuzzy +msgid "Server name must be less than 256 characters." +msgstr "密钥对名称长度必须在1到255个字符之间" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "错误的个性化格式:丢失 %s" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "损坏的个性化格式" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "个性化 %s 的内容无法解码" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "损坏的网络格式:网络 uuid 格式不正确 (%s)" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "不正确的固定 IP 地址(%s)" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "不允许重复的网络(%s)" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "错误的网络格式:丢失%s" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "错误的网络格式" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "Userdata 内容无法解码" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "accessIPv4 不是正确的IPv4格式" + +#: cinder/api/openstack/compute/servers.py:601 +#, fuzzy +msgid "accessIPv6 is not proper IPv6 format" +msgstr "accessIPv4 不是正确的IPv4格式" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "服务器名称未定义" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "提供了无效的flavorRef。" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "无法找到请求的镜像" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "提供了无效的key_name。" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "实例还没有调整大小。" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "confirm-resize中的错误 %s" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "revert-resize中的错误 %s" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "重启的参数'type'既不是HARD也不是SOFT" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "缺少重启的参数'type'" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "重启中错误 %s" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "无法找到请求的类型。" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "调整大小需要尺寸的改变。" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "错误格式的服务器实体" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "缺少属性imageRef" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "提供了无效的imageRef。" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "缺少属性flavorRef" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "没有确定adminPass" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "无效的adminPass" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "无法解析元数据键/值对" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "调整大小请求中的属性'flavorRef'无效。" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "调整大小请求要求有属性'flavorRef'。" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "无效的请求主体" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "无法解析请求中的imageRef。" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "无法找到实例" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "无法找到用来重新创建的镜像" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "实体createImage需要属性name" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "无效的元数据" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "正在从查询语句中移除选项 '%(unk_opt_str)s'" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::暂停 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::取消暂停 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::挂起 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "迁移错误 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "没有找到服务器" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "Compute.api::inject_network_info %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "compute.api::加锁 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "compute.api::解锁 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "实体createBackup需要有属性 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "错误格式的实体createBackup" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "createBackup的属性 'rotation'必须是整数" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "没有找到实例" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "host与block_migration必须确定" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "实例 %(id)s 到主机 %(host)s 的动态迁移失败" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, fuzzy, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, fuzzy, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "更新代理失败:%(resp)r" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "聚合没有动作 %s" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "只有根证书能被获取。" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "无法为VPN实例申请IP,确保实例没有运行,过一会儿再试。" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "缺少类型规范" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "无效的类型规范" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "%s 必须是'MANUAL' 或者 'AUTO'。" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "没有找到服务器。" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +#, fuzzy +msgid "Flavor not found." +msgstr "没有找到服务器。" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "没有请求主体" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "池 %s 中已经没有浮动ip。" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "没有更多的浮动ip。" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "缺少参数 dict" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "地址没有指定" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "没有固定ip与实例关联" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "关联浮动ip失败" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "无效的状态:'%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, fuzzy, python-format +msgid "Invalid mode: '%s'" +msgstr "无效的状态:'%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "无效的更新设置:'%s'" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, fuzzy, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "把主机 %(host)s 设置为 %(state)s。" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "Describe-resource是只有管理员才能执行的功能。" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "没有找到主机" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "密钥对名称长度必须在1到255个字符之间" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "密钥对 '%s' 已经存在。" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "addFixedIp缺少参数'networkId'" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "removeFixedIp缺少参数'address'" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "无法找到地址 %r" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "网络不包含动作 %s" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "为id是 %s 的网络解除关联" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "没有找到网络" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "显示id是 %s 的网络" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "正在删除id是 %s 的网络" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "错误格式的 scheduler_hints 属性" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "安全组id应该是整数" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +#, fuzzy +msgid "Security group is still in use" +msgstr "安全组id应该是整数" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "安全组 %s 已经存在" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "安全组 %s 既不是字符串也不是unicode" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "安全组 %s 不能为空。" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "安全组 %s 不能比255个字符更长。" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "父组id不是整数" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "没有找到安全组 (%s)" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "参数不够创建有效规则。" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "这条规则已经存在于组%s 中" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "父组id或者组id不是整数" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "规则id不是整数" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "没有找到规则 (%s)" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "没有指定安全组" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "安全组名称不能是空" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "启动实例 %r" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "停止实例 %r" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "vol=%s" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "删除id为 %s 的卷" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "把卷 %(volume_id)s 附加到实例 %(server_id)s 的 %(device)s 设备上" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "删除id为 %s 的快照" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "为卷 %s 创建快照" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "试图实例化单例" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "尝试删除组中最后一个成员,用删除组 %s 来代替。" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "查找用户:%r" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "访问密钥 %s 授权失败" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "使用用户名称来作为项目名称 (%s)" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "授权失败:没有此项目名称 %(pjid)s (user=%(uname)s)" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "授权失败:用户 %(uname)s 不是管理员,也不是项目 %(pjname)s 的成员" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "user.secret: %s" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "expected_signature: %s" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "签名: %s" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "用户 %s 的签名无效" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "host_only_signature: %s" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "必须指定项目" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "为用户 %(uid)s 添加角色 %(role)s 用户位置在%(pid)s 项目里" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "添加全局角色 %(role)s 给用户 %(uid)s" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "移除用户 %(uid)s 的角色 %(role)s 用户位置是在项目 %(pid)s 里" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "移除全局角色 %(role)s 对象是用户 %(uid)s" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "创建项目 %(name)s 通过管理员 %(manager_user)s" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "正在修改项目 %s" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "添加用户 %(uid)s 到项目%(pid)s" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "移除用户 %(uid)s 于项目 %(pid)s 中" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "删除项目 %s" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "已创建用户 %(rvname)s (admin: %(rvadmin)r)" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "删除用户 %s" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "修改用户 %s 的访问密钥" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "修改用户 %s 的私钥" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "将管理员状态设置为 %(admin)r 目的是针对用户 %(uid)s" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "没有 %s 项目的vpn数据" + +#: cinder/cloudpipe/pipelib.py:46 +#, fuzzy, python-format +msgid "Instance type for vpn instances" +msgstr "无效的实例类型 %(instance_type)s。" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "cloudpipe实例的启动脚本模板" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "网络地址已存入openvpn配置" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "子网掩码已存入openvpn配置" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "启动VPN %s" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "未指定计算宿主机" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "无法找到实例 %s 的宿主机" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "%(pid)s 已经超过配额,试图设置 %(num_metadata)s 个元数据属性" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "%(pid)s 已经超过配额,元数据属性键或值太长" + +#: cinder/compute/api.py:257 +#, fuzzy +msgid "Cannot run any more instances of this type." +msgstr "超过实例的配额。您无法运行更多此类型的实例。" + +#: cinder/compute/api.py:259 +#, fuzzy, python-format +msgid "Can only run %s more instances of this type." +msgstr "超过实例的配额。您只能再运行 %s 个此类型的实例。" + +#: cinder/compute/api.py:261 +#, fuzzy, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "%(pid)s 已经超过配额,试图运行 %(min_count)s 个实例" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "正在创建裸实例" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "使用 Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "准备运行 %s 个实例" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "bdm %s" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "block_device_mapping %s" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "为 %(pid)s/%(uid)s 向调度器发送创建命令" + +#: cinder/compute/api.py:871 +#, fuzzy, python-format +msgid "Going to try to soft delete instance" +msgstr "准备尝试软删除实例 %s" + +#: cinder/compute/api.py:891 +#, fuzzy, python-format +msgid "No host for instance, deleting immediately" +msgstr "没有托管实例 %s,立刻删除" + +#: cinder/compute/api.py:939 +#, fuzzy, python-format +msgid "Going to try to terminate instance" +msgstr "准备尝试终止实例 %s" + +#: cinder/compute/api.py:977 +#, fuzzy, python-format +msgid "Going to try to stop instance" +msgstr "准备尝试停止实例 %s" + +#: cinder/compute/api.py:996 +#, fuzzy, python-format +msgid "Going to try to start instance" +msgstr "准备尝试启动实例 %s" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "实例 %(instance_uuid)s 没有停止。(%(vm_state)s" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "搜索条件: %s" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "无法识别镜像类型 %s" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "flavor_id为空。假定在迁移。" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "旧的实例类型 %(current_instance_type_name)s,新的实例类型 %(new_instance_type_name)s" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "存在多个固定IP,使用第一个:%s" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "创建参数必须是正整数" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "数据库错误:%s" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "未找到要删除的实例类型 %s" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: 锁定: |%s|" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: 执行中: |%s|" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: 未执行 |%s|" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "无法加载虚拟驱动:%s" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "当前状态是 %(drv_state)s,数据库状态是 %(db_state)s。" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "cinder-compute重启后,实例正在重启。" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "虚拟机管理程序驱动不支持防火墙规则" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "正在检查状态" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "正在设置 bdm %s" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, fuzzy, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "终止实例 %(instance_uuid)s 时发生异常" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "未找到实例 %s" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "实例已经创建" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" +"镜像 '%(image_id)s' 的大小为 %(size_bytes)d ,超过实例类型 instance_type 所允许的大小 " +"%(allowed_size_bytes)d" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "正在启动虚拟机" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "实例跳过网络分配" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "实例网络设置失败" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "实例的network_info:|%s|" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "实例块设备设置失败" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "实例生产失败" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "释放实例的网络" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "%(action_str)s 实例" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "忽略异常 DiskNotFound: %s" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "终止bdm %s" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "%s。把实例的 vm_state设置为ERROR" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "无法重建实例 [%(instance_uuid)s],因为给定的镜像不存在。" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "无法重新创建实例 [%(instance_uuid)s]: %(exc)s" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "正在重新创建实例 %s" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "正在重启虚拟机 %s" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "试图重启一个没有运行的实例:%(instance_uuid)s (状态:%(state)s 预计:%(running)s)" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "实例 %s: 快照中" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "试图为一个没有运行的实例快照:%(instance_uuid)s (状态:%(state)s 预计:%(running)s)" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "找到 %(num_images)d 个镜像 (rotation: %(rotation)d)" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "轮换出%d个备份" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "正在删除镜像 %s" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "设置管理员密码失败。实例 %s 没有运行" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "实例 %s:Root密码已设置" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "该驱动不能执行set_admin_password。" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "设置管理员密码出错" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" +"试图把一个文件注入到没有运行的实例:%(instance_uuid)s (状态: %(current_power_state)s 预计: " +"%(expected_state)s)" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "实例 %(instance_uuid)s:把文件注入 %(path)s" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" +"试图更新没有运行的实例上的代理:%(instance_uuid)s (状态: %(current_power_state)s 预计: " +"%(expected_state)s)" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "实例 %(instance_uuid)s:正在把代理更新到 %(url)s" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "实例 %s:正在救援中" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "实例 %s:取消救援" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "目标与来源一样。" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "实例 %s:正在迁移" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "实例 %s: 暂停" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "实例 %s: 取消暫停" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "实例 %s :获取诊断" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "实例 %s:挂起" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "实例 %s: 恢复中" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "实例%s:锁定中" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "实例%s:取消锁定" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "实例%s:获取锁定的状态" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "实例%s:重置网络" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "实例 %s:注入网络信息" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "将注入的network_info:|%s|" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "实例 %s:正在获得VNC控制台" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "卷 %(volume_id)s 正在 %(mountpoint)s 上启动" + +#: cinder/compute/manager.py:1703 +#, fuzzy, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "正在把卷 %(volume_id)s 附加到 %(mountpoint)s" + +#: cinder/compute/manager.py:1714 +#, fuzzy, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "%(mountpoint)s 附加失败,移除中" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "%(mountpoint)s 附加失败,移除中" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "卷 %(volume_id)s 从挂载点 %(mp)s 分离" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "从未知实例%s中分离卷" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "创建tmpfile %s 来通知其他的计算节点需要挂载相同的存储。" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "实例没有卷。" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "plug_vifs() 失败%(cnt)d 次。最多重新尝试 %(max_retry)d 次在主机 %(hostname)s。" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "预在线迁移在%(dest)s失败" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "post_live_migration()已经启动。" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "没有找到floating_ip" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "没有找到floating_ip" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" +"在线迁移:意外的错误:无法继承浮动ip。\n" +"%(e)s" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "把实例迁移到 %(dest)s 成功完成。" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" +"你会看到错误“libvirt: QEMU error: Domain not found: no domain with matching " +"name。”这个错误可以放心的忽略。" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "迁移后操作启动" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "更新带宽使用缓存" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "更新主机状态" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "在数据库中找到 %(num_db_instances)s个实例,在虚拟机管理程序找到 %(num_vm_instances)s 个实例。" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +#, fuzzy, python-format +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "数据库中发现实例 %(name)s ,但是虚拟机管理程序不知道。设置加电状态为NOSTATE" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "FLAGS.reclaim_instance_interval <= 0,跳过..." + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "回收删除的实例" + +#: cinder/compute/manager.py:2458 +#, fuzzy, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "检测标签名为 '%(name_label)s' 的实例,这些实例被标识为DELETED却仍然存在于主机上。" + +#: cinder/compute/manager.py:2465 +#, fuzzy, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "销毁标签名为 '%(name_label)s' 的实例,这些实例被标识为DELETED却仍然存在于主机上。" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "无法识别的FLAGS.running_deleted_instance_action的取值 '%(action)s'" + +#: cinder/compute/manager.py:2542 +#, fuzzy, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "聚合 %(aggregate_id)s已经有主机 %(host)s。" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "遗留的网络信息 nw_info 要求使用 IPv4 子网" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "正在添加控制台" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "尝试删除不存在的控制台%(console_id)s。" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "尝试删除不存在的控制台%(console_id)s。" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "删除控制台%(console_id)s。" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "重建xvp配置" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "重写%s" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "停止xvp" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "正在启动xvp" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "启动xvp发生错误:%s" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "重启xvp" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "xvp不在运行中" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "删除过期令牌:(%s)" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "接收到令牌:%(token)s, %(token_dict)s)" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "检查令牌:%(token)s, %(token_valid)s)" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "使用空的请求上下文是不推荐的" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "无法识别的 read_deleted 取值”%s“" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "主机 %(host)s 没有计算节点" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "没有id为%(sm_backend_id)s的后台配置" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "没有 sm_flavor 调用 %(sm_flavor)s" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "没有id为 %(volume_id)s 的 sm_volume" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "没有安装 python-migrate。正退出。" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "version应该是整数" + +#: cinder/db/sqlalchemy/session.py:137 +#, fuzzy, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "SQL连接失败 (%(connstring)s)。还剩 %(attempts)d 次。" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "interface 列没有加入networks 表中" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "表 |%s| 没有创建" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "VIF 列没有加入到 fixed_ips 表中" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "为移动 mac_addresses |%s| 加入列表" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "外键约束无法添加" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "外键约束无法删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "priority列没有加入到 networks 表中" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "外键约束无法去除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "progress列没有加入到实例表中" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "无法把 flavorid 转化为整数:%s。设置 flavorid 成类似整数的字符串来降级。" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "progress 列没有加入到 compute_nodes 表中" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "dns_domains 表没有删除" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +#, fuzzy +msgid "quota_classes table not dropped" +msgstr "instance_info_caches 没有删除掉" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "glance服务器连接错误,重试中" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "达到最大尝试次数" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "正在Glance中创建镜像。元数据 %s 已经传入。" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "为Glance 进行格式化后的元数据 %s" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "从Glance返回的为Base格式化的元数据 %s" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "不是镜像所有者" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "%(timestamp)s 没有遵循任何签名格式:%(iso_formats)s" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "把 %(image_location)s 下载到 %(image_path)s失败" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "解密 %(image_location)s 到 %(image_path)s失败" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "解包 %(image_location)s 到 %(image_path)s 失败" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "上传 %(image_location)s 到 %(image_path)s 失败" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "解密私钥失败:%s" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "解密初始化vector失败:%s" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "解密镜像文件 %(image_file)s 失败:%(err)s" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "镜像中不安全的文件名" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "错误的to_global_ipv6 mac:%s" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "错误的to_global_ipv6前缀:%s" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "错误的to_global_ipv6 oject_id;%s" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "驱动仅支持入口类型 'a'。" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "试图移除不存在的链 %s。" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "未知链:%r" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "试图移除不存在的规则:%(chain)r %(rule)r %(wrap)r %(top)r" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "IPTablesManager.apply成功完成" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "挂起进程 dnsmasq 时抛出 %s" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d 过期了,重新启动dnsmasq" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "杀掉进程 radvd 时抛出 %s" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d 过期了,重新启动radvd" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "正在开启VLAN接口 %s" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "正在为 %s 开启桥接口" + +#: cinder/network/linux_net.py:1142 +#, fuzzy, python-format +msgid "Starting bridge %s " +msgstr "保证桥 %s" + +#: cinder/network/linux_net.py:1149 +#, fuzzy, python-format +msgid "Done starting bridge %s" +msgstr "保证桥 %s" + +#: cinder/network/linux_net.py:1167 +#, fuzzy, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "移除实例”%s“的虚拟网络设备时失败" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, fuzzy, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "证书 %(certificate_id)s 没有找到。" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "没有找到接口 %(interface)s" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "为实例 |%s| 分配浮动IP" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "为实例 |%s| 释放浮动IP" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "地址 |%(address)s| 没有分配" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "地址 |%(address)s| 没有分配给你的项目 |%(project)s|" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "%s 的配额超出,尝试分配地址" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "数据库不一致:DNS域|%s| 在Cinder数据库中注册,但是对浮动或者实例DNS驱动均不可见。将被忽略。" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "域 |%(domain)s| 已经存在,把区域改变为 |%(av_zone)s|。" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "域 |%(domain)s| 已经存在,把项目改变为 |%(project)s|。" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "解除 %s 过期固定ip的关联" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "设置网络主机" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "为实例 |%s| 的网路分配" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "为实例 |%(instance_id)s| 获取的网络:|%(networks)s|" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "为实例 |%s| 解除网络分配" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" +"instance-dns-zone是 |%(domain)s|,该域位于区域 |%(zone)s| 中。实例|%(instance)s| 在区域 " +"|%(zone2)s| 里。没有DNS记录将创建。" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "租用的IP |%(address)s|" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "没有关联的IP %s 被租用了" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "没有分配的IP |%s| 被租用了" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "释放的IP |%(address)s|" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "没有关联的IP %s 被释放了" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "没有租用的IP %s 被释放了" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "cidr 已经在使用" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "请求的cidr (%(cidr)s) 与已存在的超网络 (%(super)s) 冲突" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "请求的cidr (%(cidr)s) 与已存在的较小的cidr (%(smaller)s) 冲突" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "网络已经存在。" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "网路在删除前必须与项目 %s 解除关联" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "网络数量与VLAN起始数之和不能大于4049" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "网络范围不够多而不适合 %(num_networks)s。网络大小是 %(network_size)s。" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "驱动仅支持类型 'a'" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "租户ID没有设" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "Quantum客户请求:%(method)s %(action)s" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "Quantum实体没有找到:%s" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "服务器 %(status_code)s 错误:%(data)s" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "无法连接到服务器。出现错误:%s" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "无法反序列化type = '%s' 的对象" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "QuantumManager 没有使用 'multi_host' 参数。" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "QuantumManager 要求每次调用仅创建一个网落。" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "QuantumManager 没有使用 'vlan_start' 参数。" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "QuantumManager 没有使用 'vpn_start' 参数。" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "QuantumManager 没有使用 'bridge' 参数。" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "QuantumManager 没有使用 'bridge_interface' 参数。" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "QuantumManager 要求一个有效的 (.1) 网关地址。" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "无法为租户 '%(q_tenant_id)s' 找到已存在的 net-id是 '%(quantum_net_id)s' 的quantum网络" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "实例 %s 的网络分配" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "实例的端口取消分配失败:|%(instance_id)s|, port_id: |%(port_id)s|" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "实例的ipam取消分配失败:|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "服务器返回错误:%s" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "melange服务的连接错误,重试" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" +"在网络 |%(network_id)s| 中分配IP地址,该网络属于 |%(network_tenant_id)s|, 分配IP给该vif " +"|%(vif_id)s|, 其中mac是 |%(mac_address)s| 所属项目 |%(project_id)s| " + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "调用get_project_and_global_net_ids时 project_id必须是非空。" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "创建网络入口的错误" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "没有net_id = %s的网络" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, fuzzy, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "没有为vif %sid取消分配固定IP" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "将连接接口 %(interface_id)s 连接到net %(net_id)s 针对租户 %(tenant_id)s" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "端口 %(port_id)s 在net %(net_id)s 上删除,针对租户是 %(tenant_id)s" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "%s 不在有效的优先级" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "Problem '%(e)s' 试图发送到通知系统。Payload=%(payload)s" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "Problem '%(e)s' 试图发送到通知驱动 %(driver)s。" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回 %s 异常给调用者" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "未打包的上下文:%s" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "已接收 %s" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息的方法:%s" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "在 %s 做异步call" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "在 %s 做异步cast" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "做异步fanout cast" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "正在 %s 上发送通知" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" +"远程错误:%(exc_type)s %(value)s\n" +"%(traceback)s。" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "等待RPC响应返回超时" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "正在重新连接位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "连接到位于 %(hostname)s:%(port)d 的AMQP服务器" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" +"无法连接到位于%(hostname)s:%(port)d的AMQP server,尝试已经 %(max_retries)d " +"次:%(err_str)s" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "位于%(hostname)s:%(port)d的AMQP服务器不可达:%(err_str)s。%(sleep_time)d 秒钟后请再尝试。" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "为topic '%(topic)s'声明消费者失败:%(err_str)s" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "等待RPC响应超时:%s" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "从队列中消费消息失败:%s" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "给topic '%(topic)s'发布消息失败:%(err_str)s" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "无法连接到AMQP服务器:%s " + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "重建AMQP队列" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "正确的服务在运行吗?" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "无法找到另一个计算节点" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "把“%(method)s”投放在卷%(host)s\"" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "把“%(method)s”投放在主机 \"%(host)s\"" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "把“%(method)s”投放在网络 \"%(host)s\"" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "把“%(method)s”投放在 %(topic)s \"%(host)s\"" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "必须实现一个回滚 schedule" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "块存储迁移无法在共享存储使用" + +#: cinder/scheduler/driver.py:330 +#, fuzzy +msgid "Live migration can not be used without shared storage." +msgstr "块存储迁移无法在共享存储使用" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "主机 %(dest)s 和原先的主机 %(src)s 不兼容。" + +#: cinder/scheduler/driver.py:416 +#, fuzzy, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "无法迁移 %(instance_id)s 到 %(dest)s 上:缺少内存(主机:%(avail)s <= 实例:%(mem_inst)s)" + +#: cinder/scheduler/driver.py:472 +#, fuzzy, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" +"无法迁移%(instance_id)s 到 %(dest)s 上:缺少磁盘(主机:%(available)s <= " +"实例:%(necessary)s)" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "没有主机选择所定义的 %s 主题消息" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "尝试创建 %(num_instances)d 个实例" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "调度器只能理解计算节点(暂时)" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "过滤掉的主机 %(hosts)s" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "加权的主机 %(weighted_host)s" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, fuzzy, python-format +msgid "Host filter passes for %(host)s" +msgstr "主机 %(host)s 没有计算节点" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "接收到 %(service_name)s 服务更新,来自 %(host)s。" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "host_manager 只实现了“compute”" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "计算节点 %s 没有服务" + +#: cinder/scheduler/manager.py:85 +#, fuzzy, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "驱动方法 %(driver_method)s 丢失:%(e)s。撤销回schedule()" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "schedule_%(method)s 失败:%(ex)s" + +#: cinder/scheduler/manager.py:159 +#, fuzzy, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "设置实例 %(instance_uuid)s 至 ERROR 状态" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "无法统计调度器的选项文件 %(filename)s:“%(e)s”" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "无法解码调度器的选项:“%(e)s”" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "没有足够可分配的剩余CPU核心" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "卷没有足够可分配的空间" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "未设置 VCPUs;假设 CPU 集合损坏了" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "伪执行命令(子进程):%s" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "伪命令匹配 %s" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "伪命令引起异常 %s" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "伪命令的标准输出stdout='%(stdout)s' 标准错误输出 stderr='%(stderr)s'" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "请扩展模拟的 libvirt 模块来支持标记" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "请扩展伪libvirt模块来支持这种认知方法" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "正在运行的实例:%s" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "终止实例之后:%s" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "内部错误" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "强制杀死实例后:%s" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" +"下面的迁移缺少了降级:\n" +"\t%s" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "id" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "IPv4" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "IPv6" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "起始地址" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "DNS1" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "DNS2" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "VlanID" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "项目" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "uuid" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "目标 %s 已经分配" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "无法确认导出的卷 id:%s。" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "给定数据:%s" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "结果数据:%s" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "在%s 创建文件模拟客户代理" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "移除在 %s 的模拟的客户代理文件" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, fuzzy, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "超出配额" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "_create: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "_delete: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "_get: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "_get_all: %s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "test_snapshot_create: param=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "test_snapshot_create: resp_dict=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "test_snapshot_create_force: param=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "test_snapshot_create_force: resp_dict=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "test_snapshot_show: resp=%s" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "test_snapshot_detail: resp_dict=%s" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "类型:%s" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" +"%(message)s\n" +"状态码: %(_status)s\n" +"主体: %(_body)s" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "认证错误" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "授权错误" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "条目没有找到" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "正在 %(relative_url)s 执行 %(method)s" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "主体:%s" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "%(auth_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "%(relative_uri)s => code %(http_status)s" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "意外的状态码" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "解码JSON:%s" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "嵌套(调用)接收到 %(queue)s, %(value)s" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "嵌套(调用)返回 %s" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "RPC后台不支持超时" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "已接收 %s" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "打开虚拟机管理程序的连接失败" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "Compute_service记录为 %s 创建 " + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "Compute_service记录为 %s 更新 " + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "试图不过滤没有过滤的实例 %s" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "过滤器添加给实例 %s" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "提供者防火墙规则刷新" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "添加安全组规则:%r" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "添加提供者规则:%s" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "'qemu-img info'解析失败" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "转化为裸格式,但目前格式是 %s" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "针对useconnection_type=vmwareapi必须指定vmwareapi_host_ip,vmwareapi_host_username和vmwareapi_host_password" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "在vmwareapi:_create_session,得到这个异常:%s" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "在vmwareapi:_call_method,得到这个异常:%s" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "任务 [%(task_name)s] %(task_ref)s 状态:成功" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "任务 [%(task_name)s] %(task_ref)s 状态:错误 %(error_info)s" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "在vmwareapi:_poll_task,得到这个错误 %s" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" +"针对use connection_type=xenapi必须指定xenapi_connection_url, " +"xenapi_connection_username (可选) 和 xenapi_connection_password" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "无法确定iscsi initiator名字" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "不支持在XenServer启动主机" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "无法登录到XenAPI(Dom0磁盘是空么?)" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "得到异常:%s" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "没有域存在。" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "============= 初始域 =========== : %s" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "创建域:将被移除" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "没有运行的域:移除" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "运行在位置结点的域:放弃" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "没有这个域 (%s)" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "裸机结点 %s 电源停止失败" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "deactivate -> activate失败" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "destroy_domain:没有该域" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "没有该域 %s" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "域:%s" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "结点:%s" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "储存域之后:%s" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "域去活/移除失败" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "===== 域正在创建 =====" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "同样的域名已经存在" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "create_domain:在get_idle_node之前" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "新域创建:%s" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "裸机结点 %s 启动失败" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "没有该域" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "change_domain_state:新状态 %s" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "把伪域名存到文件中:%s" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "域不存在" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "未知的裸机驱动 %(d)s" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "销毁实例 '%(name)s' 时遇到错误:%(ex)s" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "实例 %(instance_name)s:正在删除实例文件 %(target)s" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "实例 %s:重启" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "_wait_for_reboot失败" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "实例 %s:已救援" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "_wait_for_rescue 失败" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "<============= 生产裸机=============>" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "实例 %s:正在创建中" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "已经注入密钥但是实例还没有运行" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "实例 %s:已启动" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "~~~~~~ 当前状态 = %s ~~~~~~" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "实例 %s 生产成功" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "实例 %s:没有启动" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "过多提交裸机分配" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "实例 %s:正在创建镜像" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "实例 %(inst_name)s:将 %(injection)s 注入镜像 %(img_id)s" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "实例 %(inst_name)s:忽略向镜像 %(img_id)s 注入数据的错误 (%(e)s)" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "实例 %s:开始方法 toXML" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "实例 %s:方法toXML 完成" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "无法得到CPU的数目,因为这个函数不是给这个平台执行的。这个错误可以被放心忽略。" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "#### RLK: cpu_arch = %s " + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "正在更新。" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "正在更新主机状态" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "free_node..." + +#: cinder/virt/baremetal/tilera.py:216 +#, fuzzy, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "针对node_id = %(id)s node_ip = %(ip)s调用了deactivate_node" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "结点状态设为0" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "rootfs 已经被移除了" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "在ping裸机节点之前" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "activate_node" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "节点处于未知的错误状态。" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "没有合适的镜像句柄配置好" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "未知的磁盘镜像句柄:%s" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "镜像已经挂载" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "挂载文件系统失败:%s" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "移除容器失败:%s" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "不支持的分区:%s" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "未知的guestmount错误" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "无法给loopback附加镜像:%s" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "未找到分区" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "映射分区失败:%s" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "NBD不可用:模块没有加载" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "没有空闲NBD设备" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "qemu-nbd 错误:%s" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "nbd 设备 %s 没有出现" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "正在连接 libvirt:%s" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "连接 libvirt 失败" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "销毁时发生错误。Code=%(errcode)s Error=%(e)s" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "删除已保存的实例时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "在 undefine 时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "实例销毁成功。" + +#: cinder/virt/libvirt/connection.py:435 +#, fuzzy, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "在 undefine 时 libvirt 发生错误。Code=%(errcode)s Error=%(e)s" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "删除实例文件 %(target)s" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "附加 LXC 块设备" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "分离LXC 块设备" + +#: cinder/virt/libvirt/connection.py:692 +#, fuzzy +msgid "Instance soft rebooted successfully." +msgstr "实例成功重启。" + +#: cinder/virt/libvirt/connection.py:696 +#, fuzzy +msgid "Failed to soft reboot instance." +msgstr "重新启动实例失败" + +#: cinder/virt/libvirt/connection.py:725 +#, fuzzy +msgid "Instance shutdown successfully." +msgstr "实例成功生产。" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "重启过程中,实例消失。" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "实例成功重启。" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "发现 %(migration_count)d 个超过 %(confirm_window)d 秒未经确认的迁移" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "正在自动确认迁移 %d" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "实例在运行" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "实例成功生产。" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "data:%(data)r, fpath: %(fpath)r" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "用户没有管理员权限" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "正在创建镜像" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "将 %(injection)s 注入到镜像 %(img_id)s" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "忽略向镜像 %(img_id)s 注入的数据的错误 (%(e)s)" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "块设备列表 block_device_list %s" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "正在启动 toXML 方法" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "toXML方法完成" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "查找 %(instance_name)s时libvirt出错:[错误代码 %(error_code)s] %(ex)s" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "libvirt版本过旧(不支持getVersion)" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "'' 必须为 1, 但是为 %d\n" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "拓扑 (%(topology)s) 必须含有 %(ks)s" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" +"已启动实例的CPU信息:\n" +"%s" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" +"CPU 不兼容.\n" +"\n" +"%(ret)s\n" +"\n" +"参考 %(u)s" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "迁移 %s 超时" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "因它像卷,所以跳过 %(path)s" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "实例 %s:开始执行 migrate_disk_and_power_off" + +#: cinder/virt/libvirt/connection.py:2513 +#, fuzzy, python-format +msgid "During wait running, instance disappeared." +msgstr "%s 在运行中消失了。" + +#: cinder/virt/libvirt/connection.py:2518 +#, fuzzy, python-format +msgid "Instance running successfully." +msgstr "实例 %s 成功运行。" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "实例 %s:开始执行 finish_migration" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "实例 %s:开始执行 finish_revert_migration" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "Libvirt模块无法加载。NWFilterFirewall 无法正常工作。" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "在 nwfilter 里调用 setup_basic_filtering" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "正在确保静态过滤器" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "nwfilter(%(instance_filter_name)s)未找到" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "名称为 %(name)s 的nwfilter(%(instance_filter_name)s) 未找到。" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "iptables 防火墙:设置基本的过滤规则" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "试图不过滤没有过滤的实例" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "%s 是一个正确的实例名称" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "%s 有一个磁盘文件" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "实例 %(instance)s 由文件 %(backing)s 来备份" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "实例 %(instance)s 正在使用的备份文件 %(backing)s 没有出现在镜像服务里。" + +#: cinder/virt/libvirt/imagecache.py:237 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像验证失败" + +#: cinder/virt/libvirt/imagecache.py:247 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像验证跳过,没有散列存储" + +#: cinder/virt/libvirt/imagecache.py:266 +#, fuzzy, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "删除 %(base_file)s 失败,错误是 %(error)s" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "基文件太新不需要删除:%s" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "正在删除基文件:%s" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "删除 %(base_file)s 失败,错误是 %(error)s" + +#: cinder/virt/libvirt/imagecache.py:299 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "%(container_format)s-%(id)s (%(base_file)s):正在检查中" + +#: cinder/virt/libvirt/imagecache.py:318 +#, fuzzy, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" +"%(container_format)s-%(id)s (%(base_file)s):正在使用中:本地节点 %(local)d, 远程节点 " +"%(remote)d" + +#: cinder/virt/libvirt/imagecache.py:330 +#, fuzzy, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" +"%(container_format)s-%(id)s (%(base_file)s):警告 -- 使用中缺少基文件! 实例: " +"%(instance_list)s" + +#: cinder/virt/libvirt/imagecache.py:338 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "%(container_format)s-%(id)s (%(base_file)s):在使用中: 在远程节点 (%(remote)d 上" + +#: cinder/virt/libvirt/imagecache.py:348 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像不在使用中" + +#: cinder/virt/libvirt/imagecache.py:354 +#, fuzzy, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "%(container_format)s-%(id)s (%(base_file)s):镜像在使用中" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "跳过验证,在 %s 上没有基础目录" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "验证基础镜像" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "未知的基文件:%s" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "活跃的基文件:%s" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "损坏的基文件:%s" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "可删除的基文件:%s" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "确认完成" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "无法找到一个开放端口" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "保证vlan %(vlan)s 和桥 %(bridge)s" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "保证桥 %s" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "移除实例”%s“的虚拟网络设备时失败" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "在 %s 未找到iSCSI设备" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "在 %(mount_device)s 上还没有找到iSCSI卷。将再次扫描并重试。尝试次数:%(tries)s" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "找到iSCSI节点 %(mount_device)s (经过%(tries)s 次再扫描)" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "调用 RetrieveProperties 时发生错误 %s" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "属性 %(attr)s 没有为管理对象 %(objName)s 设置" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "没有虚拟机注册" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "ref 为 %s 的虚拟机不存在" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "退出无效的会话或者已经退出了:%s" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "会话有错误" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "会话无效" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr " 还没有虚拟机被注册" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "Glance 镜像 %s 在被杀死的状态" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "Glance 镜像 %(image_id)s 处于未知状态 - %(state)s" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "ESX SOAP 服务器在响应里为托管系统返回一个空的端口组" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "正在ESX主机上创建名称为 %s 的端口组" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "已经在ESX主机上创建了名称为 %s 的端口组" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "HTTP连接关闭时发生异常 VMWareHTTpWrite. Exception:%s" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "无法导入 suds。" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "必须指定 vmwareapi_wsdl_loc" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "VI SDK没有提供这样的SOAP方法 “%s”" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "在 %s 中发生 httplib 错误: " + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "在 %s 套接字中发生错误 " + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "在%s里发生类型错误: " + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "在 %s 发生异常 " + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "获取实例列表" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "总共获得 %s 个实例" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "无法得到本地的存储引用" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "在ESX主机上创建名为 %s 的虚拟机" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "已经在ESX主机上创建名为 %s 的虚拟机" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" +"创建 %(vmdk_file_size_in_kb)s KB 大的虚拟磁盘和适配器类型 %(adapter_type)s 在ESX主机的本地存储 " +"%(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "创建 %(vmdk_file_size_in_kb)s KB 大的虚拟磁盘在ESX主机的本地存储 %(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "删除文件 %(flat_uploaded_vmdk_path)s 在ESX主机的本地存储 %(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "删除文件 %(flat_uploaded_vmdk_path)s 在ESX主机的本地存储 %(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "下载文件数据 %(image_ref)s 到ESX主机的数据存储 %(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "已经下载镜像文件数据 %(image_ref)s 在ESX数据存储 %(data_store_name)s 上" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "正在重新配置虚拟机实例 %s 来附加镜像磁盘" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "已经重新配置虚拟机实例 %s 来附加于镜像磁盘" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "正启动虚拟机实例 %s" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "已经启动虚拟机实例 %s" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "正在创建虚拟机实例快照 %s " + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "已经创建虚拟机实例快照 %s " + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "正在快照虚拟机实例 %s 之前复制磁盘数据" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "已经在快照虚拟机实例 %s 之前复制磁盘数据" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "上传镜像 %s" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "已经上传镜像 %s" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "正在删除临时的vmdk文件 %s" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "已经删除临时的vmdk文件 %s" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "实例未启动" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "正在重启虚拟机客户操作系统 %s" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "已经重启虚拟机客户操作系统 %s" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "正在冷启动虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "已经冷启动虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "实例 - %s 不存在" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "正在关闭虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "关闭虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "正在注销虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "已经注销虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "注销虚拟机时在 vmwareapi:vmops:destroy 中发生异常:%s" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "正在删除虚拟机 %(name)s 的内容位置在数据存储 %(datastore_name)s 中" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "已经删除虚拟机 %(name)s 的内未知在数据存储 %(datastore_name)s 中" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "当从磁盘删除虚拟机的内容时在 vmwareapi:vmops:destroy 里发生异常:%s" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "vmwareapi 不支持暂停" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "vmwareapi 不支持取消暂停" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "挂起虚拟机 %s " + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "已经挂起虚拟机 %s " + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "实例已关闭,无法挂起。" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "虚拟机 %s 已经在挂起状态。不做任何操作直接返回" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "正在恢复虚拟机 %s" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "已经恢复虚拟机 %s " + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "实例不在挂起状态" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "get_diagnostics 没有在 vmwareapi 里实现" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "正在重新配置实例 %(name)s 来设置机器的id为ip - %(ip_addr)s" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "已经重新配置虚拟机实例 %(name)s 来设置机器的id为ip - %(ip_addr)s" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "正在使用路径 %s 创建目录" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "已经创建路径为 %s 的目录" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "正在从glance镜像服务器中下载镜像 %s" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "已经从glance镜像服务器中下载镜像 %s" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "正在向Glance镜像服务器上传镜像 %s" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "已经向Glance镜像服务器上传了镜像 %s" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "正在获取镜像 %s 的大小" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "获得大小为 %(size)s 的镜像目的为了 %(image)s" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "产生 NotImplemented 错误" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake 没有 %s 的实现" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "正在调用 %(localname)s %(impl)s" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "调用 getter %s" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "xenapi.fake 没有 %s 的实现或者调用时用了错误数目的参数" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "无法为主机得到存储库:%s" + +#: cinder/virt/xenapi/host.py:169 +#, fuzzy, python-format +msgid "Unable to get updated status" +msgstr "无法得到最新的状态:%s" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "对 %(method)s 的调用返回错误:%(e)s。" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "发现不唯一的网络 name_label %s" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "发现桥 %s 的网络不唯一" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "发现网桥 %s 没有网络" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, fuzzy, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "无法找到实例 %s 的宿主机" + +#: cinder/virt/xenapi/pool.py:162 +#, fuzzy, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "注入文件失败:%(resp)r" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "无法使用全局角色 %(role_id)s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "没有找到设备 %s 的PIF" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" +"网络 %(bridge)s 的 PIF %(pif_rec['uuid'])s 有VLAN id %(pif_vlan)d。期待的数目是 " +"%(vlan_num)d" + +#: cinder/virt/xenapi/vm_utils.py:218 +#, fuzzy, python-format +msgid "Created VM" +msgstr "_create: %s" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "没有在实例 %s 找到VBD" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, fuzzy, python-format +msgid "VBD %s already detached" +msgstr "已经分离" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "无法移除 VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "无法销毁 VBD %s" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, fuzzy, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "创建了VBD %(vbd_ref)s 目的是为了虚拟机 %(vm_ref)s,VDI %(vdi_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "创建了VBD %(vbd_ref)s 目的是为了虚拟机 %(vm_ref)s,VDI %(vdi_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "无法销毁 VDI %s" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" +"创建了 VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) 位置在" +" %(sr_ref)s。" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "复制了 VDI %(vdi_ref)s ,对象来自VDI %(vdi_to_copy_ref)s ,位置在 %(sr_ref)s。" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "克隆了 VDI %(vdi_ref)s 对象来自VDI %(vdi_to_clone_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, fuzzy, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "未找到 %(vm_ref)s 的主VDI" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, fuzzy, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "正在为虚拟机 %(vm_ref)s 做快照,采用标签是 “%(label)s”" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, fuzzy, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "已经创建了快照 %(template_vm_ref)s 快照对象是虚拟机 %(vm_ref)s。" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "请求xapi 上传 %(vdi_uuids)s 作为镜像ID %(image_id)s" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "只能在ext类型的缺省本地存储库支持快速克隆。这个系统的存储库类型为 %(sr_type)s。忽略此 cow 标记。" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "请求 xapi 获取 vhd 镜像 %(image)s" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "xapi 'download_vhd' 返回“%(vdi_type)s”类型的VDI,其UUID为 “%(vdi_uuid)s”" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "image_size_bytes=%(size_bytes)d,allowed_size_bytes=%(allowed_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "镜像大小 %(size_bytes)d 超过instance_type所允许的小大 %(allowed_size_bytes)d" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, fuzzy, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "获取镜像 %(image)s" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, fuzzy, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "镜像 %(image)s 的大小:%(virtual_size)d" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "内核/内存盘镜像太大:%(vdi_size)d 字节,最大 %(max_size)d 字节" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "将VDI %s 复制到dom0的/boot/guest下" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "内核/内存盘 VDI %s 已销毁" + +#: cinder/virt/xenapi/vm_utils.py:895 +#, fuzzy, python-format +msgid "Failed to fetch glance image" +msgstr "实例 %s:获取Glance镜像失败" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "检测到 %(image_type_str)s 格式,目标是镜像 %(image_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "为PV内核查询vdi %s" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "未知的镜像格式 %(disk_image_type)s" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "VDI %s 依然可用" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver 虚拟机状态 -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "重新扫描存储库 %s" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "标记sr_matching_filter '%s' 没有遵循格式要求" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "XenAPI无法找到安装客户实例的存储库。请检查你的配置或者配置标记'sr_matching_filter'" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "无法找到content-type ISO的存储库" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "ISO:正在查看存储库 %(sr_rec)s" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "ISO:非iso内容" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "ISO:iso content_type,没有 'i18n-key' 键" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "ISO:iso content_type,i18n-key的值不是 'local-storage-iso'" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "ISO: 存储库符合标准" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "ISO: ISO, 正在查看是否是本地的主机" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "ISO: PBD %(pbd_ref)s 消失了" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "ISO: PBD匹配, 想要 %(pbd_rec)s, 目前有 %(host)s" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "ISO:含有本地PBD的存储库" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "无法为含服务器详细信息的虚拟机 %(vm_uuid)s 获取RRD XML:%(server)s。" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "无法获取包含服务器详细情况的RRD XML更新:%(server)s。" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "来自Xenserver无效的统计数据:%s" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "VHD %(vdi_uuid)s 有父 %(parent_ref)s" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "父标识 %(parent_uuid)s 和原先的父标识 %(original_parent_uuid)s 不匹配,正在等待合并..." + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "VHD coalesce 将要超过(%(max_attempts)d),放弃中..." + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "等待设备 %s 创建超时" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "插入VBD %s... " + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "插入VBD %s 完成。" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "VBD %(vbd_ref)s 作为 %(orig_dev)s 插入" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "VBD %(vbd_ref)s 插入错误的设备,重新映射为 %(dev)s" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "正在销毁VDI为 %s 的 VBD " + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "已经销毁VDI为 %s 的 VBD" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "对 %s 运行pygrub" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "找到Xen内核 %s" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "没有找到Xen内核。正在启动HVM。" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "分区:" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr " %(num)s: %(ptype)s %(size)d sectors" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "将分区表 %(primary_first)d %(primary_last)d 写入到 %(dev_path)s..." + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "完成写入分区表 %s 。" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "安装在该镜像的XenServer工具可以进行网络注入。网络文件不会被操作。" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "该镜像有XenServer工具,但是不能进行网络注入" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "没有在该镜像上安装XenServer工具" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "直接操作接口文件" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "挂载文件系统失败(期望的是非Linux实例):%s" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, fuzzy, python-format +msgid "Updating progress to %(progress)d" +msgstr "将实例 '%(instance_uuid)s' 的进度更新到 %(progress)d" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "尝试过启动不存在的实例,实例的id %s 不正确" + +#: cinder/virt/xenapi/vmops.py:233 +#, fuzzy +msgid "Starting instance" +msgstr "正在启动虚拟机" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "从 dom0 中移除内核/内存盘文件" + +#: cinder/virt/xenapi/vmops.py:358 +#, fuzzy +msgid "Failed to spawn, rolling back" +msgstr "在数据库更新卷失败" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +#, fuzzy, python-format +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "正在自动配置实例 %(instance_uuid)s 的磁盘,尝试调整分区大小..." + +#: cinder/virt/xenapi/vmops.py:515 +#, fuzzy, python-format +msgid "Invalid value for injected_files: %r" +msgstr "injected_files无效的值:'%s'" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "注入文件路径:'%s'" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "设置管理员密码" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "重置网络" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "设置VCPU 权重" + +#: cinder/virt/xenapi/vmops.py:544 +#, fuzzy +msgid "Starting VM" +msgstr "重启xvp" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "%(hypervisor)s/%(os)s/%(architecture)s 最新的agent build 是 %(version)s 版本" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "没有找到 %(hypervisor)s/%(os)s/%(architecture)s 的代理创建" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "查询代理版本" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "实例代理版本:%s" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "把代理更新为 %s" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "无法为 '%s' 确定opaque_ref。" + +#: cinder/virt/xenapi/vmops.py:670 +#, fuzzy, python-format +msgid "Finished snapshot and upload for VM" +msgstr "快照完毕并为虚拟机 %s 上传" + +#: cinder/virt/xenapi/vmops.py:677 +#, fuzzy, python-format +msgid "Starting snapshot for VM" +msgstr "开始为虚拟机 %s 快照" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "无法为实例 %(instance_uuid)s 快照:%(exc)s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "将 vhd 转移到新主机失败" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "将 VDI %(cow_uuid)s 由 %(old_gb)dGB 调小到 %(new_gb)dGB" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "将 VDI %(vdi_uuid)s 由 %(old_gb)dGB 调大到 %(new_gb)dGB" + +#: cinder/virt/xenapi/vmops.py:901 +#, fuzzy, python-format +msgid "Resize complete" +msgstr "调整实例 %s 的大小完毕" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "查询代理版本失败:%(resp)r" + +#: cinder/virt/xenapi/vmops.py:949 +#, fuzzy, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "domid 由 %(olddomid)s 改变为 %(newdomid)s" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "更新代理失败:%(resp)r" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "交换钥匙失败:%(resp)r" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "更新密码失败:%(resp)r" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "注入文件失败:%(resp)r" + +#: cinder/virt/xenapi/vmops.py:1032 +#, fuzzy, python-format +msgid "VM already halted, skipping shutdown..." +msgstr "虚拟机 %(instance_uuid)s 已经终止,跳过关闭..." + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "无法为虚拟机找到VBD" + +#: cinder/virt/xenapi/vmops.py:1097 +#, fuzzy, python-format +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "实例 %(instance_uuid)s 使用RAW或者VHD,跳过内核和内存盘的删除" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "实例拥有内核或者内存盘,但不是二者均有" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "内核/内存盘文件移除了" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +#, fuzzy +msgid "Destroying VM" +msgstr "重启xvp" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "虚拟机不存在,跳过销毁..." + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "实例已处于救援模式:%s" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "找到%(instance_count)d个超过%(timeout)d秒悬挂的重启" + +#: cinder/virt/xenapi/vmops.py:1300 +#, fuzzy, python-format +msgid "Automatically hard rebooting" +msgstr "自动冷重启 %d" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, fuzzy, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "为实例 %(instance_uuid)s 关闭虚拟机" + +#: cinder/virt/xenapi/vmops.py:1379 +#, fuzzy, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "没有找到实例 %(instance_id)s" + +#: cinder/virt/xenapi/vmops.py:1383 +#, fuzzy +msgid "In ERROR state" +msgstr "节点处于未知的错误状态。" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "无法得到带宽信息" + +#: cinder/virt/xenapi/vmops.py:1469 +#, fuzzy, python-format +msgid "Injecting network info to xenstore" +msgstr "为虚拟机注入网络信息到xs:|%s|" + +#: cinder/virt/xenapi/vmops.py:1483 +#, fuzzy +msgid "Creating vifs" +msgstr "正在创建镜像" + +#: cinder/virt/xenapi/vmops.py:1492 +#, fuzzy, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "正在为虚拟机 %(vm_ref)s,网络 %(network_ref)s 创建VIF。" + +#: cinder/virt/xenapi/vmops.py:1495 +#, fuzzy, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "正在为虚拟机 %(vm_ref)s,网络 %(network_ref)s 创建VIF。" + +#: cinder/virt/xenapi/vmops.py:1520 +#, fuzzy, python-format +msgid "Injecting hostname to xenstore" +msgstr "为虚拟机注入hostname到xs:|%s|" + +#: cinder/virt/xenapi/vmops.py:1545 +#, fuzzy, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" +"%(method)s 的代理调用返回无效的响应:%(ret)r。虚拟机id=%(instance_uuid)s; path=%(path)s; " +"args=%(addl_args)r" + +#: cinder/virt/xenapi/vmops.py:1566 +#, fuzzy, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "超时:调用 %(method)s 超时。虚拟机id=%(instance_uuid)s; args=%(args)r" + +#: cinder/virt/xenapi/vmops.py:1570 +#, fuzzy, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "没有执行:代理不支持 %(method)s 的调用。虚拟机id=%(instance_uuid)s; args=%(args)r" + +#: cinder/virt/xenapi/vmops.py:1575 +#, fuzzy, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "对 %(method)s 的调用返回错误:%(e)s。" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "OpenSSL错误:%s" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "在volume_utils创建存储库" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "类型is = %s" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "name = %s" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "将 %(label)s 作为 %(sr_ref)s 创建。" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "无法创建存储库" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "在volume_utils里引入sr" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "将 %(label)s 作为 %(sr_ref)s 引入。" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "为存储库创建pbd" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "插入存储库" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "无法引入存储库" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "无法得到使用uuid的存储库" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "遗忘存储库 %s..." + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "无法遗忘Storage Repository" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "引入 %s..." + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "无法在VBD %s找到存储库" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "异常 %(exc)s 在为 %(sr_ref)s 得到PBDs时被忽略" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "异常 %(exc)s 在拔开PBD %(pbd)s 时被忽略" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "无法在存储库 %s 上引入VDI" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "无法使得VDI %s 的记录运行" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "无法为存储库 %s 引入VDI" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "在存储库 %s 寻找VDIs出错" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "无法获得目标信息 %(data)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "挂载点无法被翻译:%s" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "找不到VDI ref" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "正在创建存储库 %s" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "无法创建存储库" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "无法获取存储库记录" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "引入存储库 %s" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "在xapi数据库找到存储库。无需引入。" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "无法引入存储库" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "检查存储库 %s" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "在xapi数据库没有找到存储库 %s" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "不能遗忘存储库" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "无法在存储库 %(sr_ref)s 上为实例 %(instance_name)s 创建 VDI" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "存储库 %(sr_ref)s 不能为实例%(instance_name)s使用" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "无法附加卷到实例 %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "挂载点 %(mountpoint)s 附加到实例 %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "分离_volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "无法找到 %s 卷" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "无法分离 %s 卷" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "无法销毁VBD %s" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "净化存储库 %s 出错" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "挂载点 %(mountpoint)s 从实例 %(instance_name)s 分离" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "握手出错:%s" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "无效的请求:%s" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "请求:%s" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "请求缺少令牌:%s" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "请求中有无效令牌:%s" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "意外错误:%s" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "启动cinder-xvpvncproxy节点(版本 %s)" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "状态必须可用" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "%(pid)s 的配额超出,尝试创建 %(size)sG 的卷" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "已经附加" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "已经分离" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "必须可用" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "卷组状态必须可获取" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "从失败的执行中恢复。尝试编号 %s" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "卷组 %s 不存在" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "跳过ensure_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "跳过remove_export。没有为卷提供iscsi_target:%d" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "跳过remove_export。没有为卷导出iscsi_target:%d" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "采用discovery,ISCSI provider_location 没有存储" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "ISCSI Discovery:找到 %s" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "无法确认导出的卷id:%(volume_id)s。" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "FAKE ISCSI: %s" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "RBD没有池 %s" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "Sheepdog 没有工作:%s" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "Sheepdog 没有工作" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "LoggingVolumeDriver: %s" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "重新导出卷%s" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "卷 %s:跳过导出" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "卷 %s: 创建中" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "卷%(vol_name)s:创建大小为%(vol_size)s的逻辑卷" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "卷%s:正在创建导出" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "卷%s:创建成功" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "卷仍在附加中" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "卷不属于这个节点" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "卷%s:正在移除导出" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "卷%s:删除中" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "卷 %s:卷繁忙" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "卷%s:删除成功" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "快照 %s:正在创建" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "快照 %(snap_name)s:正在创建" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "快照 %s:正在删除" + +#: cinder/volume/manager.py:214 +#, fuzzy, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "快照 %s:创建成功" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "快照 %s:删除成功" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "检查卷能力" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "找到新能力:%s" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "清理能力" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "收到通知 {%s}" + +#: cinder/volume/netapp.py:79 +#, fuzzy, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "镜像 %(image_id)s 无法接受,原因是: %(reason)s" + +#: cinder/volume/netapp.py:109 +#, fuzzy, python-format +msgid "%s is not set" +msgstr "租户ID没有设" + +#: cinder/volume/netapp.py:128 +#, fuzzy, python-format +msgid "Connected to DFM server" +msgstr "连接到 %s 的AMQP服务器" + +#: cinder/volume/netapp.py:159 +#, fuzzy, python-format +msgid "Job failed: %s" +msgstr "未知的基文件:%s" + +#: cinder/volume/netapp.py:240 +#, fuzzy +msgid "Failed to provision dataset member" +msgstr "更新数据库失败" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "无法为VDI %s 找到VBD" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "无法为卷 %s 找到 iSCSI 导出" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, fuzzy, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/netapp.py:614 +#, fuzzy, python-format +msgid "Failed to get host details for host ID %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/netapp.py:620 +#, fuzzy, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/netapp.py:625 +#, fuzzy, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "为ip: %s获取元数据失败" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "指定san_password或者san_private_key" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "san_ip必须设置" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "没有为 %(zfs_poolname)s 找到LUID。Output=%(out)s" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "返回CLIQ命令 %s" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "CLIQ命令 %(verb)s %(cliq_args)s 错误格式的响应。Result=%(out)s" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "在运行CLIQ命令 %(verb)s %(cliq_args)s 时发生错误。输出结果 Result=%(out)s" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "集群 %(cluster_name)s 有意外数量的虚拟 ip 地址。输出结果 Result=%(_xml)s" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "卷信息:%(volume_name)s => %(volume_attributes)s" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "不支持local_path" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "无法为卷 %s 确定项目,无法导出" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "SolidFire API 调用的参数:%s" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "调用 json.loads() 引起异常:%s" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "SolidFire API调用结果:%s" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "找到solidfire帐户:%s" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "solidfire帐户:%s 不存在,正在创建..." + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "进入SolidFire create_volume..." + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "离开SolidFire create_volume" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "进入SolidFire delete_volume..." + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "正在删除volumeID:%s " + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "离开SolidFire delete_volume" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "正在执行SolidFire ensure_export..." + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "正在执行SolidFire create_export..." + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "name不能是None" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "id不能是None" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "存储库 name = %s" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "参数:%s" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "创建存储库 %s失败... 继续" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "创建失败" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "新存储库的存储库 UUID:%s" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "更新数据库失败" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "引入存储库 %s 失败... 继续" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "到达后台 %d 失败" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "XenSMDriver要求xenapi连接" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "初始化会话失败" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "卷会在后台创建 - %d" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "在数据库更新卷失败" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "无法创建卷" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "删除VDI失败" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "在数据库删除卷失败" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "没有在数据库找到卷" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "没有在数据库中找到后台(backend)" + +#: cinder/volume/nexenta/__init__.py:27 +#, fuzzy, python-format +msgid "Nexenta SA returned the error" +msgstr "服务器返回错误:%s" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, fuzzy, python-format +msgid "Sending JSON data: %s" +msgstr "给定数据:%s" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +#, fuzzy +msgid "Bad response from server" +msgstr "来自SolidFire API的错误响应" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, fuzzy, python-format +msgid "Got response: %s" +msgstr "响应 %s" + +#: cinder/volume/nexenta/volume.py:96 +#, fuzzy, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "卷组 %s 不存在" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "未指定Cinder访问参数。" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "无法找到虚拟存储阵列 %(id)d。" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "无法找到虚拟存储阵列 %(name)s。" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "父组id和组id不能一样" + +#~ msgid "No body provided" +#~ msgstr "没有提供主体" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "创建VSA %(display_name)s 类型是 %(vc_type)s" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "删除id:%s的VSA" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "把地址 %(ip)s 与VSA %(id)s关联" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "取消地址与VSA %(id)s关联" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "没有找到 %(obj)s 该对象的ID是 %(id)s" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "%(obj)s 的ID是 %(id)s 它属于VSA %(own_vsa_id)s而不是VSA %(vsa_id)s。" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "索引。vsa_id=%(vsa_id)s" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "详细内容。vsa_id=%(vsa_id)s" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "创建卷。vsa_id=%(vsa_id)s, body=%(body)s" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "创建 %(size)s GB的卷来源是VSA ID %(vsa_id)s" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "更新%(obj)s至id:%(id)s ,修改:%(changes)s" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "删除卷。vsa_id=%(vsa_id)s, id=%(id)s" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "显示卷。vsa_id=%(vsa_id)s, id=%(id)s" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "为 VSA %s 索引实例" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "准备强制删除虚拟机 %(instance_uuid)s,即使已经删除了。" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "数据库中不存在实例 %(instance_uuid)s,但是无论如何要用特殊上下文来关闭。" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "尝试销毁已经销毁的实例: %s" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "无法不丢失数据地降级" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "%(fl_host)s:%(fl_port)d上的AMQP服务器不可达:%(e)s。 %(fl_intv)d 秒钟后再尝试。" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "%(tries)d 次尝试后依然无法连接到AMQP服务器。正在关闭。" + +#~ msgid "Reconnected to queue" +#~ msgstr "重新连接队列" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "从队列获取消息失败:%s" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "为%s初始化适配器消费者(Adapter Consumer)" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "\"%(exchange)s\" fanout exchange用路由密钥 \"%(key)s\" 创建的" + +#~ msgid "Exception while processing consumer" +#~ msgstr "处理消费者出现异常" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "正在创建 \"%(exchange)s\" fanout exchange" + +#~ msgid "topic is %s" +#~ msgstr "主题是 %s" + +#~ msgid "message %s" +#~ msgstr "消息 %s" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "无法确认tmpfile %(ipath)s 在相同的共享存储的 %(src)s 和 %(dest)s之间。" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "_filter_hosts: %(request_spec)s" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "按照驱动类型 %s 过滤主机" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "主机 %s 没有足够的容量。跳过" + +#~ msgid "Filter hosts: %s" +#~ msgstr "过滤主机:%s" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "必须实现主机选择机制" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "所选择主机的最大数目(%d)" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "选择超过了主机 %(host)s" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "提供卷 %(name)s 它的大小为 %(size)s 位置在主机 %(host)s" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "volume_params %(volume_params)s" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "%(i)d:卷名%(name)s" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "正在尝试生产 %(num_volumes)d 个卷" + +#~ msgid "Error creating volumes" +#~ msgstr "创建卷失败" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "Non-VSA 卷 %d" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "正在生产卷 %(volume_id)s 它的驱动类型为 %(drive_type)s" + +#~ msgid "Error creating volume" +#~ msgstr "创建卷失败" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "没有能力分配大小为 %(size)s 的卷" + +#~ msgid "Host %s:" +#~ msgstr "主机 %s:" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" +#~ "\t驱动 %(qosgrp)-25s:总共 %(total)2s,已用 %(used)2s," +#~ " 空闲 %(free)2s。可用 %(avail)-5s" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "\t [ tab] LeastUsedHost:最佳主机: %(best_host)s。(使用的空间 %(min_used)s)" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "\t MostAvailCap:最佳主机: %(best_host)s。 (可用 %(max_avail)s %(type_str)s)" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "(%(nm)s) 发布 (键:%(routing_key)s) %(message)s" + +#~ msgid "Publishing to route %s" +#~ msgstr "发布并路由到 %s" + +#~ msgid "Declaring queue %s" +#~ msgstr "正在声明队列 %s" + +#~ msgid "Declaring exchange %s" +#~ msgstr "正在声明交换(exchange) %s" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "把队列 %(queue)s 绑定到 %(exchange)s 采用的键是 %(routing_key)s" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "从队列 %(queue)s 取消息:%(message)s" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "测试:模拟错误的 VSA 名称。抛出异常" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "测试:模拟数据测试。抛出" + +#~ msgid "Test: user_data = %s" +#~ msgstr "测试:user_data = %s" + +#~ msgid "_create: param=%s" +#~ msgstr "_create: param=%s" + +#~ msgid "Host %s" +#~ msgstr "主机 %s" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "测试:在主机 %(host)s 上提供 %(name)s" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "\t vol=%(vol)s" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "测试:VSA更新请求:vsa_id=%(vsa_id)s values=%(values)s" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "测试:卷创建:%s" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "测试:卷获取请求:id=%(volume_id)s" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "测试:卷更新请求:id=%(volume_id)s values=%(values)s" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "测试:卷获取:id=%(volume_id)s" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "任务 [%(name)s] %(task)s 状态:成功 %(result)s" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "任务 [%(name)s] %(task)s 状态:%(status)s %(error_info)s" + +#~ msgid "virsh said: %r" +#~ msgstr "virsh 输出: %r" + +#~ msgid "cool, it's a device" +#~ msgstr "酷,这是个设备" + +#~ msgid "Unable to read LXC console" +#~ msgstr "无法读取 LXC 控制台" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" +#~ "转换成 xml...\n" +#~ ":%s " + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "nwfilter(%(instance_secgroup_filter_name)s) 未找到" + +#~ msgid "Created VM %s..." +#~ msgstr "创建虚拟机 %s ..." + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "已经将虚拟机 %(instance_name)s 创建成 %(vm_ref)s。" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "正在为虚拟机 %(vm_ref)s 创建一个特定的CDROM VBD, VDI %(vdi_ref)s ... " + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "已经创建了一个基于CDROM 的VBD %(vbd_ref)s,目的是为虚拟机 %(vm_ref)s,VDI %(vdi_ref)s。" + +#~ msgid "Image Type: %s" +#~ msgstr "镜像类型:%s" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "ISO:找到可能包含该ISO镜像的存储库" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "正在为VDI%s创建VBD " + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "为VDI %s 创建VBD完成。" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "VBD.unplug 操作第一次成功。" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "VBD.unplug 操作被拒绝:重试中..." + +#~ msgid "Not sleeping anymore!" +#~ msgstr "不再睡眠!" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "VBD.unplug 操作最终成功。" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "在 VBD.unplug 操作中忽略XenAPI.Failure:%s" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "忽略 XenAPI.Failure %s" + +#~ msgid "Starting instance %s" +#~ msgstr "启动实例 %s" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "实例 %s:生产失败" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "实例 %s 生产失败 - 正在进行清理" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "实例 %s:生产失败 - 无法创建虚拟机" + +#~ msgid "Starting VM %s..." +#~ msgstr "启动虚拟机 %s..." + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "生产 VM %(instance_uuid)s 它是由 %(vm_ref)s 创建的。" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "实例 %s:等待运行" + +#~ msgid "Instance %s: running" +#~ msgstr "实例 %s:运行" + +#~ msgid "Resources to remove:%s" +#~ msgstr "将要移除的资源:%s" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "移除VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "跳过 %s 的VDI的销毁" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "正在为实例 %(instance_uuid)s 销毁 VDIs" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "实例 %(instance_uuid)s 销毁了" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "正在为实例%(instance_uuid)s 销毁虚拟机" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "没有找到为迁移 %d 的实例,正在跳过" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "正在为虚拟机创建 vif(s):|%s|" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "创建VIF %(vif_ref)s 是为了虚拟机 %(vm_ref)s,网络 %(network_ref)s。" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "%(method)s 的调用返回错误:%(e)s。虚拟机id=%(instance_uuid)s; args=%(args)r" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "为虚拟机 %(vm_ref)s ,VDI %(vdi_ref)s 创建VBD... " + +#~ msgid "Error destroying VDI" +#~ msgstr "销毁VDI出错" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "\t卷 %s 不是VSA 卷" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "\tFE VSA 卷 %s 的创建 - 什么都不做" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "为 %s 的VSA BE create_volume失败" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "为 %s 的VSA BE create_volume成功" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "\tFE VSA 卷 %s 的删除 - 什么都不做" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "为 %s 的VSA BE delete_volume失败" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "为 %s 的VSA BE delete_volume成功" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "\tFE VSA 卷 %s 本地路径调用 - 调用discover" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "\tFE VSA Volume %s 确保导出 - 什么都不做" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "\tFE VSA Volume %s 创建导出 - 什么都不做" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "\tFE VSA Volume %s 删除导出 - 什么都不做" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "为 %s 的VSA BE remove_export失败" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "获取QoS信息失败" + +#~ msgid "drive_name not defined" +#~ msgstr "没有定义drive_name" + +#~ msgid "invalid drive type name %s" +#~ msgstr "无效的drive类型名称 %s" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "*** 用于实验的VSA代码 ***" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "请求的VCs (%d)数目过大。设置为默认值" + +#~ msgid "Creating VSA: %s" +#~ msgstr "正在创建VSA:%s" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s:创建卷 " +#~ "%(vol_name)s,%(vol_size)d GB,类型 %(vol_type_id)s" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "VSA ID %(vsa_id)d:更新VSA状态到 %(status)s" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "VSA ID %(vsa_id)d:更新VSA调用" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "添加%(add_cnt)s VCs到 %(vsa_name)s。" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "%(del_cnt)s VCs 从VSA %(vsa_name)s 中删除。" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "VSA ID %(vsa_id)s:删除 %(direction)s 卷 %(vol_name)s" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "无法删除卷 %s" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "VSA ID %(vsa_id)s:强迫删除。%(direction)s 卷 %(vol_name)s" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "将要试图终止VSA ID %s" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "VSA ID %(vsa_id)s:删除实例 %(name)s" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "收到VSA %s 的Create call" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "没有找到VSA %(vsa_id)d" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "VSA ID %(vsa_id)s:Drive %(vol_id)s 创建。状态 %(status)s" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "Drive %(vol_name)s (%(vol_disp_name)s) 依然在创建阶段 - 请等待" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "VSA ID %(vsa_id)d: 不是所有的卷都创建了 (%(cvol_real)d of %(cvol_exp)d)" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" +#~ "VSA ID %(vsa_id)d:Drive %(vol_name)s " +#~ "(%(vol_disp_name)s)处于 %(status)s 状态。" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "更新卷 %(vol_name)s 的附加状态失败。%(ex)s" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "VSA ID %(vsa_id)d:删除所有BE卷" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "VSA ID %(vsa_id)d:启动 %(vc_count)d 个实例" + diff --git a/cinder/locale/zh_TW/LC_MESSAGES/nova.po b/cinder/locale/zh_TW/LC_MESSAGES/nova.po new file mode 100644 index 00000000000..48bc07cdc4d --- /dev/null +++ b/cinder/locale/zh_TW/LC_MESSAGES/nova.po @@ -0,0 +1,8207 @@ +# Chinese (Traditional) translation for cinder +# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011 +# This file is distributed under the same license as the cinder package. +# FIRST AUTHOR , 2011. +# +msgid "" +msgstr "" +"Project-Id-Version: cinder\n" +"Report-Msgid-Bugs-To: FULL NAME \n" +"POT-Creation-Date: 2012-04-08 23:04+0000\n" +"PO-Revision-Date: 2012-03-07 02:00+0000\n" +"Last-Translator: Charles Hsu \n" +"Language-Team: Chinese (Traditional) \n" +"Plural-Forms: nplurals=1; plural=0\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.6\n" + +#: cinder/context.py:59 +#, python-format +msgid "Arguments dropped when creating context: %s" +msgstr "" + +#: cinder/context.py:90 +#, python-format +msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" +msgstr "" + +#: cinder/crypto.py:48 +msgid "Filename of root CA" +msgstr "" + +#: cinder/crypto.py:51 +msgid "Filename of private key" +msgstr "Private key 私鑰檔案名稱" + +#: cinder/crypto.py:54 +msgid "Filename of root Certificate Revocation List" +msgstr "" + +#: cinder/crypto.py:57 +msgid "Where we keep our keys" +msgstr "我們的keys留存於何處" + +#: cinder/crypto.py:60 +msgid "Where we keep our root CA" +msgstr "我們的根憑證留存於何處" + +#: cinder/crypto.py:63 +msgid "Should we use a CA for each project?" +msgstr "是否需要一個project 使用一個CA" + +#: cinder/crypto.py:67 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: cinder/crypto.py:72 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: cinder/crypto.py:292 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: cinder/exception.py:56 +msgid "Unexpected error while running command." +msgstr "非預期的執行錯誤" + +#: cinder/exception.py:59 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"命令: %(cmd)s\n" +"退出代碼: %(exit_code)s\n" +"標準輸出: %(stdout)r\n" +"標準錯誤輸出: %(stderr)r" + +#: cinder/exception.py:94 +msgid "DB exception wrapped." +msgstr "" + +#: cinder/exception.py:155 +msgid "An unknown exception occurred." +msgstr "發生一個未知例外" + +#: cinder/exception.py:178 +msgid "Failed to decrypt text" +msgstr "內文解碼失敗" + +#: cinder/exception.py:182 +msgid "Failed to paginate through images from image service" +msgstr "" + +#: cinder/exception.py:186 +msgid "Virtual Interface creation failed" +msgstr "建立虛擬介面失敗" + +#: cinder/exception.py:190 +msgid "5 attempts to create virtual interfacewith unique mac address failed" +msgstr "連續嘗試 5 次建立唯一網路位置(MAC)的虛擬介面失敗" + +#: cinder/exception.py:195 +msgid "Connection to glance failed" +msgstr "連接到glance失敗" + +#: cinder/exception.py:199 +msgid "Connection to melange failed" +msgstr "" + +#: cinder/exception.py:203 +msgid "Not authorized." +msgstr "未被授權" + +#: cinder/exception.py:208 +msgid "User does not have admin privileges" +msgstr "使用者並沒有管理者權力" + +#: cinder/exception.py:212 +#, python-format +msgid "Policy doesn't allow %(action)s to be performed." +msgstr "" + +#: cinder/exception.py:216 +#, python-format +msgid "Not authorized for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:220 +msgid "Unacceptable parameters." +msgstr "不可接受的參數值" + +#: cinder/exception.py:225 +msgid "Invalid snapshot" +msgstr "無效的快照(snapshot)" + +#: cinder/exception.py:229 +#, python-format +msgid "Volume %(volume_id)s is not attached to anything" +msgstr "" + +#: cinder/exception.py:233 cinder/api/openstack/compute/contrib/keypairs.py:113 +msgid "Keypair data is invalid" +msgstr "無效的Keypair" + +#: cinder/exception.py:237 +msgid "Failed to load data into json format" +msgstr "" + +#: cinder/exception.py:241 +msgid "The request is invalid." +msgstr "" + +#: cinder/exception.py:245 +#, python-format +msgid "Invalid signature %(signature)s for user %(user)s." +msgstr "" + +#: cinder/exception.py:249 +msgid "Invalid input received" +msgstr "" + +#: cinder/exception.py:253 +#, python-format +msgid "Invalid instance type %(instance_type)s." +msgstr "" + +#: cinder/exception.py:257 +msgid "Invalid volume type" +msgstr "" + +#: cinder/exception.py:261 +msgid "Invalid volume" +msgstr "" + +#: cinder/exception.py:265 +#, python-format +msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s" +msgstr "" + +#: cinder/exception.py:269 +#, python-format +msgid "Invalid IP protocol %(protocol)s." +msgstr "" + +#: cinder/exception.py:273 +#, python-format +msgid "Invalid content type %(content_type)s." +msgstr "" + +#: cinder/exception.py:277 +#, python-format +msgid "Invalid cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:281 +msgid "Invalid reuse of an RPC connection." +msgstr "" + +#: cinder/exception.py:285 +msgid "Invalid Parameter: Unicode is not supported by the current database." +msgstr "" + +#: cinder/exception.py:292 +#, python-format +msgid "%(err)s" +msgstr "" + +#: cinder/exception.py:296 +#, python-format +msgid "" +"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:" +" %(reason)s." +msgstr "" + +#: cinder/exception.py:301 +#, python-format +msgid "Group not valid. Reason: %(reason)s" +msgstr "" + +#: cinder/exception.py:305 +#, python-format +msgid "" +"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while" +" the instance is in this state." +msgstr "" + +#: cinder/exception.py:310 +#, python-format +msgid "Instance %(instance_id)s is not running." +msgstr "" + +#: cinder/exception.py:314 +#, python-format +msgid "Instance %(instance_id)s is not suspended." +msgstr "" + +#: cinder/exception.py:318 +#, python-format +msgid "Instance %(instance_id)s is not in rescue mode" +msgstr "" + +#: cinder/exception.py:322 +msgid "Failed to suspend instance" +msgstr "" + +#: cinder/exception.py:326 +msgid "Failed to resume server" +msgstr "" + +#: cinder/exception.py:330 +msgid "Failed to reboot instance" +msgstr "" + +#: cinder/exception.py:334 +msgid "Failed to terminate instance" +msgstr "" + +#: cinder/exception.py:338 +msgid "Service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:342 +msgid "Volume service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:346 +msgid "Compute service is unavailable at this time." +msgstr "" + +#: cinder/exception.py:350 +#, python-format +msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)." +msgstr "" + +#: cinder/exception.py:355 +msgid "Destination compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:359 +msgid "Original compute host is unavailable at this time." +msgstr "" + +#: cinder/exception.py:363 +msgid "The supplied hypervisor type of is invalid." +msgstr "" + +#: cinder/exception.py:367 +msgid "The instance requires a newer hypervisor version than has been provided." +msgstr "" + +#: cinder/exception.py:372 +#, python-format +msgid "" +"The supplied disk path (%(path)s) already exists, it is expected not to " +"exist." +msgstr "" + +#: cinder/exception.py:377 +#, python-format +msgid "The supplied device path (%(path)s) is invalid." +msgstr "" + +#: cinder/exception.py:381 +#, python-format +msgid "The supplied device (%(device)s) is busy." +msgstr "" + +#: cinder/exception.py:385 +msgid "Unacceptable CPU info" +msgstr "" + +#: cinder/exception.py:389 +#, python-format +msgid "%(address)s is not a valid IP v4/6 address." +msgstr "" + +#: cinder/exception.py:393 +#, python-format +msgid "" +"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN " +"tag is %(tag)s, but the one associated with the port group is %(pgroup)s." +msgstr "" + +#: cinder/exception.py:399 +#, python-format +msgid "" +"vSwitch which contains the port group %(bridge)s is not associated with " +"the desired physical adapter. Expected vSwitch is %(expected)s, but the " +"one associated is %(actual)s." +msgstr "" + +#: cinder/exception.py:406 +#, python-format +msgid "Disk format %(disk_format)s is not acceptable" +msgstr "" + +#: cinder/exception.py:410 +#, python-format +msgid "Image %(image_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:414 +#, python-format +msgid "Instance %(instance_id)s is unacceptable: %(reason)s" +msgstr "" + +#: cinder/exception.py:418 +#, python-format +msgid "Ec2 id %(ec2_id)s is unacceptable." +msgstr "" + +#: cinder/exception.py:422 +msgid "Resource could not be found." +msgstr "" + +#: cinder/exception.py:427 +#, python-format +msgid "Required flag %(flag)s not set." +msgstr "" + +#: cinder/exception.py:431 +#, python-format +msgid "Volume %(volume_id)s could not be found." +msgstr "" + +#: cinder/exception.py:435 +#, python-format +msgid "Unable to locate account %(account_name)s on Solidfire device" +msgstr "" + +#: cinder/exception.py:440 +#, python-format +msgid "Volume not found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:444 +#, python-format +msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:449 +msgid "Zero volume types found." +msgstr "" + +#: cinder/exception.py:453 +#, python-format +msgid "Volume type %(volume_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:457 +#, python-format +msgid "Volume type with name %(volume_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:462 +#, python-format +msgid "" +"Volume Type %(volume_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:467 +#, python-format +msgid "Snapshot %(snapshot_id)s could not be found." +msgstr "" + +#: cinder/exception.py:471 +#, python-format +msgid "deleting volume %(volume_name)s that has snapshot" +msgstr "" + +#: cinder/exception.py:475 +#, python-format +msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" +msgstr "" + +#: cinder/exception.py:480 +#, python-format +msgid "No target id found for volume %(volume_id)s." +msgstr "" + +#: cinder/exception.py:484 +#, python-format +msgid "No disk at %(location)s" +msgstr "" + +#: cinder/exception.py:488 +#, python-format +msgid "Could not find a handler for %(driver_type)s volume." +msgstr "" + +#: cinder/exception.py:492 +#, python-format +msgid "Invalid image href %(image_href)s." +msgstr "" + +#: cinder/exception.py:496 +msgid "" +"Some images have been stored via hrefs. This version of the api does not " +"support displaying image hrefs." +msgstr "" + +#: cinder/exception.py:501 +#, python-format +msgid "Image %(image_id)s could not be found." +msgstr "" + +#: cinder/exception.py:505 +#, python-format +msgid "Kernel not found for image %(image_id)s." +msgstr "" + +#: cinder/exception.py:509 +#, python-format +msgid "User %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:513 +#, python-format +msgid "Project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:517 +#, python-format +msgid "User %(user_id)s is not a member of project %(project_id)s." +msgstr "" + +#: cinder/exception.py:521 +#, python-format +msgid "Role %(role_id)s could not be found." +msgstr "" + +#: cinder/exception.py:525 +msgid "Cannot find SR to read/write VDI." +msgstr "" + +#: cinder/exception.py:529 +#, python-format +msgid "%(req)s is required to create a network." +msgstr "" + +#: cinder/exception.py:533 +#, python-format +msgid "Network %(network_id)s could not be found." +msgstr "" + +#: cinder/exception.py:537 +#, python-format +msgid "Network could not be found for bridge %(bridge)s" +msgstr "" + +#: cinder/exception.py:541 +#, python-format +msgid "Network could not be found for uuid %(uuid)s" +msgstr "" + +#: cinder/exception.py:545 +#, python-format +msgid "Network could not be found with cidr %(cidr)s." +msgstr "" + +#: cinder/exception.py:549 +#, python-format +msgid "Network could not be found for instance %(instance_id)s." +msgstr "" + +#: cinder/exception.py:553 +msgid "No networks defined." +msgstr "" + +#: cinder/exception.py:557 +#, python-format +msgid "" +"Either Network uuid %(network_uuid)s is not present or is not assigned to" +" the project %(project_id)s." +msgstr "" + +#: cinder/exception.py:562 +#, python-format +msgid "Host is not set to the network (%(network_id)s)." +msgstr "" + +#: cinder/exception.py:566 +#, python-format +msgid "Network %(network)s has active ports, cannot delete." +msgstr "" + +#: cinder/exception.py:570 +msgid "Could not find the datastore reference(s) which the VM uses." +msgstr "" + +#: cinder/exception.py:574 +#, python-format +msgid "No fixed IP associated with id %(id)s." +msgstr "" + +#: cinder/exception.py:578 +#, python-format +msgid "Fixed ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:582 +#, python-format +msgid "Instance %(instance_id)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:586 +#, python-format +msgid "Network host %(host)s has zero fixed ips in network %(network_id)s." +msgstr "" + +#: cinder/exception.py:591 +#, python-format +msgid "Instance %(instance_id)s doesn't have fixed ip '%(ip)s'." +msgstr "" + +#: cinder/exception.py:595 +#, python-format +msgid "Host %(host)s has zero fixed ips." +msgstr "" + +#: cinder/exception.py:599 +#, python-format +msgid "" +"Fixed IP address (%(address)s) does not exist in network " +"(%(network_uuid)s)." +msgstr "" + +#: cinder/exception.py:604 +#, python-format +msgid "Fixed IP address %(address)s is already in use." +msgstr "" + +#: cinder/exception.py:608 +#, python-format +msgid "Fixed IP address %(address)s is invalid." +msgstr "" + +#: cinder/exception.py:612 +msgid "Zero fixed ips available." +msgstr "" + +#: cinder/exception.py:616 +msgid "Zero fixed ips could be found." +msgstr "" + +#: cinder/exception.py:620 +#, python-format +msgid "Floating ip not found for id %(id)s." +msgstr "" + +#: cinder/exception.py:624 +#, python-format +msgid "The DNS entry %(name)s already exists in domain %(domain)s." +msgstr "" + +#: cinder/exception.py:628 +#, python-format +msgid "Floating ip not found for address %(address)s." +msgstr "" + +#: cinder/exception.py:632 +#, python-format +msgid "Floating ip not found for host %(host)s." +msgstr "" + +#: cinder/exception.py:636 +msgid "Zero floating ips available." +msgstr "" + +#: cinder/exception.py:640 +#, python-format +msgid "Floating ip %(address)s is associated." +msgstr "" + +#: cinder/exception.py:644 +#, python-format +msgid "Floating ip %(address)s is not associated." +msgstr "" + +#: cinder/exception.py:648 +msgid "Zero floating ips exist." +msgstr "" + +#: cinder/exception.py:652 +#, python-format +msgid "Interface %(interface)s not found." +msgstr "" + +#: cinder/exception.py:656 +#, python-format +msgid "Keypair %(name)s not found for user %(user_id)s" +msgstr "" + +#: cinder/exception.py:660 +#, python-format +msgid "Certificate %(certificate_id)s not found." +msgstr "" + +#: cinder/exception.py:664 +#, python-format +msgid "Service %(service_id)s could not be found." +msgstr "" + +#: cinder/exception.py:668 +#, python-format +msgid "Host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:672 +#, python-format +msgid "Compute host %(host)s could not be found." +msgstr "" + +#: cinder/exception.py:676 +#, python-format +msgid "Could not find binary %(binary)s on host %(host)s." +msgstr "" + +#: cinder/exception.py:680 +#, python-format +msgid "Auth token %(token)s could not be found." +msgstr "" + +#: cinder/exception.py:684 +#, python-format +msgid "Access Key %(access_key)s could not be found." +msgstr "" + +#: cinder/exception.py:688 +msgid "Quota could not be found" +msgstr "" + +#: cinder/exception.py:692 +#, python-format +msgid "Quota for project %(project_id)s could not be found." +msgstr "" + +#: cinder/exception.py:696 +#, python-format +msgid "Quota class %(class_name)s could not be found." +msgstr "" + +#: cinder/exception.py:700 +#, python-format +msgid "Security group %(security_group_id)s not found." +msgstr "" + +#: cinder/exception.py:704 +#, python-format +msgid "Security group %(security_group_id)s not found for project %(project_id)s." +msgstr "" + +#: cinder/exception.py:709 +#, python-format +msgid "Security group with rule %(rule_id)s not found." +msgstr "" + +#: cinder/exception.py:713 +#, python-format +msgid "" +"Security group %(security_group_id)s is already associated with the " +"instance %(instance_id)s" +msgstr "" + +#: cinder/exception.py:718 +#, python-format +msgid "" +"Security group %(security_group_id)s is not associated with the instance " +"%(instance_id)s" +msgstr "" + +#: cinder/exception.py:723 +#, python-format +msgid "Migration %(migration_id)s could not be found." +msgstr "" + +#: cinder/exception.py:727 +#, python-format +msgid "Migration not found for instance %(instance_id)s with status %(status)s." +msgstr "" + +#: cinder/exception.py:732 +#, python-format +msgid "Console pool %(pool_id)s could not be found." +msgstr "" + +#: cinder/exception.py:736 +#, python-format +msgid "" +"Console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s not found." +msgstr "" + +#: cinder/exception.py:742 +#, python-format +msgid "Console %(console_id)s could not be found." +msgstr "" + +#: cinder/exception.py:746 +#, python-format +msgid "Console for instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:750 +#, python-format +msgid "" +"Console for instance %(instance_id)s in pool %(pool_id)s could not be " +"found." +msgstr "" + +#: cinder/exception.py:755 +#, python-format +msgid "Invalid console type %(console_type)s " +msgstr "" + +#: cinder/exception.py:759 +msgid "Zero instance types found." +msgstr "" + +#: cinder/exception.py:763 +#, python-format +msgid "Instance type %(instance_type_id)s could not be found." +msgstr "" + +#: cinder/exception.py:767 +#, python-format +msgid "Instance type with name %(instance_type_name)s could not be found." +msgstr "" + +#: cinder/exception.py:772 +#, python-format +msgid "Flavor %(flavor_id)s could not be found." +msgstr "" + +#: cinder/exception.py:776 +#, python-format +msgid "Cell %(cell_id)s could not be found." +msgstr "" + +#: cinder/exception.py:780 +#, python-format +msgid "Scheduler Host Filter %(filter_name)s could not be found." +msgstr "" + +#: cinder/exception.py:784 +#, python-format +msgid "Scheduler cost function %(cost_fn_str)s could not be found." +msgstr "" + +#: cinder/exception.py:789 +#, python-format +msgid "Scheduler weight flag not found: %(flag_name)s" +msgstr "" + +#: cinder/exception.py:793 +#, python-format +msgid "Instance %(instance_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:798 +#, python-format +msgid "" +"Instance Type %(instance_type_id)s has no extra specs with key " +"%(extra_specs_key)s." +msgstr "" + +#: cinder/exception.py:803 +msgid "LDAP object could not be found" +msgstr "" + +#: cinder/exception.py:807 +#, python-format +msgid "LDAP user %(user_id)s could not be found." +msgstr "" + +#: cinder/exception.py:811 +#, python-format +msgid "LDAP group %(group_id)s could not be found." +msgstr "" + +#: cinder/exception.py:815 +#, python-format +msgid "LDAP user %(user_id)s is not a member of group %(group_id)s." +msgstr "" + +#: cinder/exception.py:819 +#, python-format +msgid "File %(file_path)s could not be found." +msgstr "" + +#: cinder/exception.py:823 +msgid "Zero files could be found." +msgstr "" + +#: cinder/exception.py:827 +#, python-format +msgid "Virtual switch associated with the network adapter %(adapter)s not found." +msgstr "" + +#: cinder/exception.py:832 +#, python-format +msgid "Network adapter %(adapter)s could not be found." +msgstr "" + +#: cinder/exception.py:836 +#, python-format +msgid "Class %(class_name)s could not be found: %(exception)s" +msgstr "" + +#: cinder/exception.py:840 +msgid "Action not allowed." +msgstr "" + +#: cinder/exception.py:844 +#, python-format +msgid "Unable to use global role %(role_id)s" +msgstr "" + +#: cinder/exception.py:848 +msgid "Rotation is not allowed for snapshots" +msgstr "" + +#: cinder/exception.py:852 +msgid "Rotation param is required for backup image_type" +msgstr "" + +#: cinder/exception.py:861 +#, python-format +msgid "Key pair %(key_name)s already exists." +msgstr "" + +#: cinder/exception.py:865 +#, python-format +msgid "User %(user)s already exists." +msgstr "" + +#: cinder/exception.py:869 +#, python-format +msgid "LDAP user %(user)s already exists." +msgstr "" + +#: cinder/exception.py:873 +#, python-format +msgid "LDAP group %(group)s already exists." +msgstr "" + +#: cinder/exception.py:877 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" + +#: cinder/exception.py:882 +#, python-format +msgid "Project %(project)s already exists." +msgstr "" + +#: cinder/exception.py:886 +#, python-format +msgid "Instance %(name)s already exists." +msgstr "" + +#: cinder/exception.py:890 +#, python-format +msgid "Instance Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:894 +#, python-format +msgid "Volume Type %(name)s already exists." +msgstr "" + +#: cinder/exception.py:898 +#, python-format +msgid "%(path)s is on shared storage: %(reason)s" +msgstr "" + +#: cinder/exception.py:902 +msgid "Migration error" +msgstr "" + +#: cinder/exception.py:906 +#, python-format +msgid "Malformed message body: %(reason)s" +msgstr "" + +#: cinder/exception.py:910 +#, python-format +msgid "Could not find config at %(path)s" +msgstr "" + +#: cinder/exception.py:914 +#, python-format +msgid "Could not load paste app '%(name)s' from %(path)s" +msgstr "" + +#: cinder/exception.py:918 +msgid "When resizing, instances must change size!" +msgstr "" + +#: cinder/exception.py:922 +msgid "Image is larger than instance type allows" +msgstr "" + +#: cinder/exception.py:926 +msgid "1 or more Zones could not complete the request" +msgstr "" + +#: cinder/exception.py:930 +msgid "Instance type's memory is too small for requested image." +msgstr "" + +#: cinder/exception.py:934 +msgid "Instance type's disk is too small for requested image." +msgstr "" + +#: cinder/exception.py:938 +#, python-format +msgid "Insufficient free memory on compute node to start %(uuid)s." +msgstr "" + +#: cinder/exception.py:942 +msgid "Could not fetch bandwidth/cpu/disk metrics for this host." +msgstr "" + +#: cinder/exception.py:946 +#, python-format +msgid "No valid host was found. %(reason)s" +msgstr "" + +#: cinder/exception.py:950 +#, python-format +msgid "Host %(host)s is not up or doesn't exist." +msgstr "" + +#: cinder/exception.py:954 +msgid "Quota exceeded" +msgstr "" + +#: cinder/exception.py:958 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: " +"%(reason)s." +msgstr "" + +#: cinder/exception.py:963 +#, python-format +msgid "Aggregate %(aggregate_id)s could not be found." +msgstr "" + +#: cinder/exception.py:967 +#, python-format +msgid "Aggregate %(aggregate_name)s already exists." +msgstr "" + +#: cinder/exception.py:971 +#, python-format +msgid "Aggregate %(aggregate_id)s has no host %(host)s." +msgstr "" + +#: cinder/exception.py:975 +#, python-format +msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s." +msgstr "" + +#: cinder/exception.py:980 +#, python-format +msgid "Host %(host)s already member of another aggregate." +msgstr "" + +#: cinder/exception.py:984 +#, python-format +msgid "Aggregate %(aggregate_id)s already has host %(host)s." +msgstr "" + +#: cinder/exception.py:988 +#, python-format +msgid "Detected more than one volume with name %(vol_name)s" +msgstr "" + +#: cinder/exception.py:992 +#, python-format +msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" +msgstr "" + +#: cinder/exception.py:997 +msgid "Unable to create instance type" +msgstr "" + +#: cinder/exception.py:1001 +msgid "Bad response from SolidFire API" +msgstr "" + +#: cinder/exception.py:1005 +#, python-format +msgid "Error in SolidFire API response: status=%(status)s" +msgstr "" + +#: cinder/exception.py:1009 +#, python-format +msgid "Error in SolidFire API response: data=%(data)s" +msgstr "" + +#: cinder/exception.py:1013 +#, python-format +msgid "Detected existing vlan with id %(vlan)d" +msgstr "" + +#: cinder/exception.py:1017 +#, python-format +msgid "Instance %(instance_id)s could not be found." +msgstr "" + +#: cinder/exception.py:1021 +#, python-format +msgid "Invalid id: %(val)s (expecting \"i-...\")." +msgstr "" + +#: cinder/exception.py:1025 +#, python-format +msgid "Could not fetch image %(image)s" +msgstr "" + +#: cinder/log.py:315 +#, python-format +msgid "syslog facility must be one of: %s" +msgstr "" + +#: cinder/manager.py:146 +#, python-format +msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run" +msgstr "" + +#: cinder/manager.py:152 +#, python-format +msgid "Running periodic task %(full_task_name)s" +msgstr "" + +#: cinder/manager.py:159 +#, python-format +msgid "Error during %(full_task_name)s: %(e)s" +msgstr "" + +#: cinder/manager.py:203 +msgid "Notifying Schedulers of capabilities ..." +msgstr "" + +#: cinder/policy.py:30 +msgid "JSON file representing policy" +msgstr "" + +#: cinder/policy.py:33 +msgid "Rule checked when requested rule is not found" +msgstr "" + +#: cinder/service.py:137 +msgid "SIGTERM received" +msgstr "" + +#: cinder/service.py:177 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: cinder/service.py:195 +#, python-format +msgid "Creating Consumer connection for Service %s" +msgstr "" + +#: cinder/service.py:282 +msgid "Service killed that has no database entry" +msgstr "" + +#: cinder/service.py:319 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: cinder/service.py:334 +msgid "Recovered model server connection!" +msgstr "" + +#: cinder/service.py:340 +msgid "model server went away" +msgstr "" + +#: cinder/service.py:433 +msgid "Full set of FLAGS:" +msgstr "" + +#: cinder/service.py:440 +#, python-format +msgid "%(flag)s : FLAG SET " +msgstr "" + +#: cinder/utils.py:79 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: cinder/utils.py:165 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: cinder/utils.py:210 +#, python-format +msgid "Got unknown keyword args to utils.execute: %r" +msgstr "" + +#: cinder/utils.py:220 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: cinder/utils.py:236 cinder/utils.py:315 +#, python-format +msgid "Result was %s" +msgstr "" + +#: cinder/utils.py:249 +#, python-format +msgid "%r failed. Retrying." +msgstr "" + +#: cinder/utils.py:291 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: cinder/utils.py:293 +msgid "Environment not supported over SSH" +msgstr "" + +#: cinder/utils.py:297 +msgid "process_input not supported over SSH" +msgstr "" + +#: cinder/utils.py:352 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: cinder/utils.py:534 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" + +#: cinder/utils.py:537 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: cinder/utils.py:648 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: cinder/utils.py:659 +#, python-format +msgid "backend %s" +msgstr "" + +#: cinder/utils.py:709 +msgid "in looping call" +msgstr "" + +#: cinder/utils.py:927 +#, python-format +msgid "Attempting to grab semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:931 +#, python-format +msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:935 +#, python-format +msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:942 +#, python-format +msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"..." +msgstr "" + +#: cinder/utils.py:1001 +#, python-format +msgid "Found sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1008 +#, python-format +msgid "Cleaned sentinel %(filename)s for pid %(pid)s" +msgstr "" + +#: cinder/utils.py:1023 +#, python-format +msgid "Found lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1028 +#, python-format +msgid "Cleaned lockfile %(file)s with link count %(count)d" +msgstr "" + +#: cinder/utils.py:1138 +#, python-format +msgid "Expected object of type: %s" +msgstr "" + +#: cinder/utils.py:1169 +#, python-format +msgid "Invalid server_string: %s" +msgstr "" + +#: cinder/utils.py:1298 +#, python-format +msgid "timefunc: '%(name)s' took %(total_time).2f secs" +msgstr "" + +#: cinder/utils.py:1330 +msgid "Original exception being dropped" +msgstr "" + +#: cinder/utils.py:1461 +#, python-format +msgid "Class %(fullname)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1463 +#, python-format +msgid "Class %(fullname)s is deprecated" +msgstr "" + +#: cinder/utils.py:1495 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated: %(msg)s" +msgstr "" + +#: cinder/utils.py:1497 +#, python-format +msgid "Function %(name)s in %(location)s is deprecated" +msgstr "" + +#: cinder/utils.py:1681 +#, python-format +msgid "Could not remove tmpdir: %s" +msgstr "" + +#: cinder/wsgi.py:97 +#, python-format +msgid "Started %(name)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:108 +msgid "Stopping WSGI server." +msgstr "" + +#: cinder/wsgi.py:111 +msgid "Stopping raw TCP server." +msgstr "" + +#: cinder/wsgi.py:117 +#, python-format +msgid "Starting TCP server %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: cinder/wsgi.py:133 +msgid "WSGI server has stopped." +msgstr "" + +#: cinder/wsgi.py:211 +msgid "You must implement __call__" +msgstr "" + +#: cinder/api/direct.py:218 +msgid "not available" +msgstr "" + +#: cinder/api/direct.py:299 +#, python-format +msgid "Returned non-serializeable type: %s" +msgstr "" + +#: cinder/api/sizelimit.py:51 +msgid "Request is too large." +msgstr "" + +#: cinder/api/validator.py:142 +#, python-format +msgid "%(key)s with value %(value)s failed validator %(validator)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:73 +#, python-format +msgid "%(code)s: %(message)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:95 +#, python-format +msgid "FaultWrapper: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:170 +msgid "Too many failed authentications." +msgstr "" + +#: cinder/api/ec2/__init__.py:180 +#, python-format +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and" +" will be locked out for %(lock_mins)d minutes." +msgstr "" + +#: cinder/api/ec2/__init__.py:267 +msgid "Signature not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:271 +msgid "Access key not provided" +msgstr "" + +#: cinder/api/ec2/__init__.py:306 cinder/api/ec2/__init__.py:319 +msgid "Failure communicating with keystone" +msgstr "" + +#: cinder/api/ec2/__init__.py:388 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:404 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" + +#: cinder/api/ec2/__init__.py:435 +#, python-format +msgid "action: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:437 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:512 +#, python-format +msgid "Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" + +#: cinder/api/ec2/__init__.py:584 +#, python-format +msgid "InstanceNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:590 +#, python-format +msgid "VolumeNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:596 +#, python-format +msgid "SnapshotNotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:602 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:605 +#, python-format +msgid "EC2APIError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:613 +#, python-format +msgid "KeyPairExists raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:617 +#, python-format +msgid "InvalidParameterValue raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:621 +#, python-format +msgid "InvalidPortRange raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:625 +#, python-format +msgid "NotAuthorized raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:629 +#, python-format +msgid "InvalidRequest raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:633 +#, python-format +msgid "QuotaError raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:637 +#, python-format +msgid "Invalid id: bogus (expecting \"i-...\"): %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:646 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:647 +#, python-format +msgid "Environment: %s" +msgstr "" + +#: cinder/api/ec2/__init__.py:649 cinder/api/metadata/handler.py:248 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: cinder/api/ec2/apirequest.py:64 +#, python-format +msgid "Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:336 +#, python-format +msgid "Create snapshot of volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:372 +#, python-format +msgid "" +"Value (%s) for KeyName is invalid. Content limited to Alphanumeric " +"character, spaces, dashes, and underscore." +msgstr "" + +#: cinder/api/ec2/cloud.py:378 +#, python-format +msgid "Value (%s) for Keyname is invalid. Length exceeds maximum of 255." +msgstr "" + +#: cinder/api/ec2/cloud.py:382 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:391 +#, python-format +msgid "Import key %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:409 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:551 +msgid "Invalid CIDR" +msgstr "" + +#: cinder/api/ec2/cloud.py:639 cinder/api/ec2/cloud.py:693 +#: cinder/api/ec2/cloud.py:800 +msgid "Not enough parameters, need group_name or group_id" +msgstr "" + +#: cinder/api/ec2/cloud.py:654 +#: cinder/api/openstack/compute/contrib/security_groups.py:517 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:666 cinder/api/ec2/cloud.py:719 +#, python-format +msgid "%s Not enough parameters to build a valid rule" +msgstr "" + +#: cinder/api/ec2/cloud.py:684 cinder/api/ec2/cloud.py:744 +msgid "No rule for the specified parameters." +msgstr "" + +#: cinder/api/ec2/cloud.py:708 +#: cinder/api/openstack/compute/contrib/security_groups.py:354 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:725 +#, python-format +msgid "%s - This rule already exists in group" +msgstr "" + +#: cinder/api/ec2/cloud.py:769 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Content limited to " +"Alphanumeric characters, spaces, dashes, and underscores." +msgstr "" + +#: cinder/api/ec2/cloud.py:776 +#, python-format +msgid "" +"Value (%s) for parameter GroupName is invalid. Length exceeds maximum of " +"255." +msgstr "" + +#: cinder/api/ec2/cloud.py:780 +#: cinder/api/openstack/compute/contrib/security_groups.py:292 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:783 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: cinder/api/ec2/cloud.py:815 +#: cinder/api/openstack/compute/contrib/security_groups.py:245 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:823 cinder/compute/manager.py:1630 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:894 +#, python-format +msgid "Create volume from snapshot %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:898 cinder/api/openstack/compute/contrib/volumes.py:186 +#: cinder/api/openstack/volume/volumes.py:222 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: cinder/api/ec2/cloud.py:921 +msgid "Delete Failed" +msgstr "" + +#: cinder/api/ec2/cloud.py:931 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:939 +msgid "Attach Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:952 cinder/api/openstack/compute/contrib/volumes.py:366 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:959 +msgid "Detach Volume Failed." +msgstr "" + +#: cinder/api/ec2/cloud.py:984 cinder/api/ec2/cloud.py:1041 +#: cinder/api/ec2/cloud.py:1518 cinder/api/ec2/cloud.py:1533 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1107 +#, python-format +msgid "vol = %s\n" +msgstr "" + +#: cinder/api/ec2/cloud.py:1255 +msgid "Allocate address" +msgstr "" + +#: cinder/api/ec2/cloud.py:1267 +#, python-format +msgid "Release address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1272 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1282 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1308 +msgid "Image must be available" +msgstr "" + +#: cinder/api/ec2/cloud.py:1329 +msgid "Going to start terminating instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1343 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: cinder/api/ec2/cloud.py:1354 +msgid "Going to stop instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1365 +msgid "Going to start instances" +msgstr "" + +#: cinder/api/ec2/cloud.py:1455 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1471 +msgid "imageLocation is required" +msgstr "" + +#: cinder/api/ec2/cloud.py:1490 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1536 +msgid "user or group not specified" +msgstr "" + +#: cinder/api/ec2/cloud.py:1538 +msgid "only group \"all\" is supported" +msgstr "" + +#: cinder/api/ec2/cloud.py:1540 +msgid "operation_type must be add or remove" +msgstr "" + +#: cinder/api/ec2/cloud.py:1542 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: cinder/api/ec2/cloud.py:1555 +#, python-format +msgid "Not allowed to modify attributes for image %s" +msgstr "" + +#: cinder/api/ec2/cloud.py:1603 +#, python-format +msgid "Couldn't stop instance with in %d sec" +msgstr "" + +#: cinder/api/metadata/handler.py:246 cinder/api/metadata/handler.py:253 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:43 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:45 cinder/api/openstack/wsgi.py:886 +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "" + +#: cinder/api/openstack/__init__.py:94 +msgid "Must specify an ExtensionManager class" +msgstr "" + +#: cinder/api/openstack/__init__.py:105 +#, python-format +msgid "Extended resource: %s" +msgstr "" + +#: cinder/api/openstack/__init__.py:130 +#, python-format +msgid "" +"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such " +"resource" +msgstr "" + +#: cinder/api/openstack/__init__.py:135 +#, python-format +msgid "Extension %(ext_name)s extending resource: %(collection)s" +msgstr "" + +#: cinder/api/openstack/auth.py:90 +#, python-format +msgid "%(user_id)s could not be found with token '%(token)s'" +msgstr "" + +#: cinder/api/openstack/auth.py:134 +#, python-format +msgid "%(user_id)s must be an admin or a member of %(project_id)s" +msgstr "" + +#: cinder/api/openstack/auth.py:152 +msgid "Authentication requests must be made against a version root (e.g. /v2)." +msgstr "" + +#: cinder/api/openstack/auth.py:167 +#, python-format +msgid "Could not find %s in request." +msgstr "" + +#: cinder/api/openstack/auth.py:191 +#, python-format +msgid "Successfully authenticated '%s'" +msgstr "" + +#: cinder/api/openstack/auth.py:241 +msgid "User not found with provided API key." +msgstr "" + +#: cinder/api/openstack/auth.py:258 +#, python-format +msgid "Provided API key is valid, but not for user '%(username)s'" +msgstr "" + +#: cinder/api/openstack/common.py:133 cinder/api/openstack/common.py:167 +msgid "limit param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:136 cinder/api/openstack/common.py:171 +msgid "limit param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:161 +msgid "offset param must be an integer" +msgstr "" + +#: cinder/api/openstack/common.py:175 +msgid "offset param must be positive" +msgstr "" + +#: cinder/api/openstack/common.py:203 +#, python-format +msgid "marker [%s] not found" +msgstr "" + +#: cinder/api/openstack/common.py:243 +#, python-format +msgid "href %s does not contain version" +msgstr "" + +#: cinder/api/openstack/common.py:278 +msgid "Image metadata limit exceeded" +msgstr "" + +#: cinder/api/openstack/common.py:295 +#, python-format +msgid "Converting nw_info: %s" +msgstr "" + +#: cinder/api/openstack/common.py:305 +#, python-format +msgid "Converted networks: %s" +msgstr "" + +#: cinder/api/openstack/common.py:338 +#, python-format +msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s" +msgstr "" + +#: cinder/api/openstack/common.py:341 +#, python-format +msgid "Instance is in an invalid state for '%(action)s'" +msgstr "" + +#: cinder/api/openstack/common.py:421 +msgid "Rejecting snapshot request, snapshots currently disabled" +msgstr "" + +#: cinder/api/openstack/common.py:423 +msgid "Instance snapshots are not permitted at this time." +msgstr "" + +#: cinder/api/openstack/extensions.py:188 +#, python-format +msgid "Loaded extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:225 +#, python-format +msgid "Ext name: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:226 +#, python-format +msgid "Ext alias: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:227 +#, python-format +msgid "Ext description: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:229 +#, python-format +msgid "Ext namespace: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:230 +#, python-format +msgid "Ext updated: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:232 +#, python-format +msgid "Exception loading extension: %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:246 +#, python-format +msgid "Loading extension %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:252 +#, python-format +msgid "Calling extension factory %s" +msgstr "" + +#: cinder/api/openstack/extensions.py:264 +#, python-format +msgid "Failed to load extension %(ext_factory)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:344 +#, python-format +msgid "Failed to load extension %(classpath)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/extensions.py:368 +#, python-format +msgid "Failed to load extension %(ext_name)s: %(exc)s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:135 cinder/api/openstack/wsgi.py:538 +msgid "cannot understand JSON" +msgstr "" + +#: cinder/api/openstack/wsgi.py:159 +#: cinder/api/openstack/compute/contrib/hosts.py:86 +msgid "cannot understand XML" +msgstr "" + +#: cinder/api/openstack/wsgi.py:543 +msgid "too many body keys" +msgstr "" + +#: cinder/api/openstack/wsgi.py:582 +#, python-format +msgid "Exception handling resource: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:586 +#, python-format +msgid "Fault thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:589 +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:697 +msgid "Unrecognized Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:701 +msgid "No Content-Type provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:705 +msgid "Empty body provided in request" +msgstr "" + +#: cinder/api/openstack/wsgi.py:816 +#, python-format +msgid "There is no such action: %s" +msgstr "" + +#: cinder/api/openstack/wsgi.py:819 cinder/api/openstack/wsgi.py:832 +#: cinder/api/openstack/compute/server_metadata.py:58 +#: cinder/api/openstack/compute/server_metadata.py:76 +#: cinder/api/openstack/compute/server_metadata.py:101 +#: cinder/api/openstack/compute/server_metadata.py:126 +#: cinder/api/openstack/compute/contrib/admin_actions.py:211 +#: cinder/api/openstack/compute/contrib/console_output.py:52 +msgid "Malformed request body" +msgstr "" + +#: cinder/api/openstack/wsgi.py:829 +msgid "Unsupported Content-Type" +msgstr "" + +#: cinder/api/openstack/wsgi.py:841 +msgid "Malformed request url" +msgstr "" + +#: cinder/api/openstack/wsgi.py:889 +#, python-format +msgid "%(url)s returned a fault: %(e)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:265 +msgid "element is not a child" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:414 +msgid "root element selecting a list" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:739 +#, python-format +msgid "Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s" +msgstr "" + +#: cinder/api/openstack/xmlutil.py:858 +msgid "subclasses must implement construct()!" +msgstr "" + +#: cinder/api/openstack/compute/extensions.py:29 +#: cinder/api/openstack/volume/extensions.py:29 +msgid "Initializing extension manager." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:40 +#: cinder/api/openstack/compute/images.py:146 +#: cinder/api/openstack/compute/images.py:161 +msgid "Image not found." +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:79 +msgid "Incorrect request body format" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:83 +#: cinder/api/openstack/compute/server_metadata.py:80 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:79 +#: cinder/api/openstack/compute/contrib/volumetypes.py:188 +msgid "Request body and URI mismatch" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:86 +#: cinder/api/openstack/compute/server_metadata.py:84 +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:82 +#: cinder/api/openstack/compute/contrib/volumetypes.py:191 +msgid "Request body contains too many items" +msgstr "" + +#: cinder/api/openstack/compute/image_metadata.py:111 +msgid "Invalid metadata key" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:74 +msgid "Instance does not exist" +msgstr "" + +#: cinder/api/openstack/compute/ips.py:97 +msgid "Instance is not a member of specified network" +msgstr "" + +#: cinder/api/openstack/compute/limits.py:140 +#, python-format +msgid "" +"Only %(value)s %(verb)s request(s) can be made to %(uri)s every " +"%(unit_string)s." +msgstr "" + +#: cinder/api/openstack/compute/limits.py:266 +msgid "This request was rate-limited." +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:38 +#: cinder/api/openstack/compute/server_metadata.py:122 +#: cinder/api/openstack/compute/server_metadata.py:159 +msgid "Server does not exist" +msgstr "" + +#: cinder/api/openstack/compute/server_metadata.py:141 +#: cinder/api/openstack/compute/server_metadata.py:152 +msgid "Metadata item was not found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:425 +#, python-format +msgid "Invalid server status: %(status)s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:433 +msgid "Invalid changes-since value" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:483 +msgid "Personality file limit exceeded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:485 +msgid "Personality file path too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:487 +msgid "Personality file content too long" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:501 +msgid "Server name is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:505 +msgid "Server name is an empty string" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:509 +msgid "Server name must be less than 256 characters." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:527 +#, python-format +msgid "Bad personality format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:530 +msgid "Bad personality format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:535 +#, python-format +msgid "Personality content for %s cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:550 +#, python-format +msgid "Bad networks format: network uuid is not in proper format (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:559 +#, python-format +msgid "Invalid fixed IP address (%s)" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:566 +#, python-format +msgid "Duplicate networks (%s) are not allowed" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:572 +#, python-format +msgid "Bad network format: missing %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:575 +msgid "Bad networks format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:587 +msgid "Userdata content cannot be decoded" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:594 +msgid "accessIPv4 is not proper IPv4 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:601 +msgid "accessIPv6 is not proper IPv6 format" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:633 +msgid "Server name is not defined" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:675 +#: cinder/api/openstack/compute/servers.py:740 +msgid "Invalid flavorRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:737 +msgid "Can not find requested image" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:743 +msgid "Invalid key_name provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:829 +#: cinder/api/openstack/compute/servers.py:849 +msgid "Instance has not been resized." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:835 +#, python-format +msgid "Error in confirm-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:855 +#, python-format +msgid "Error in revert-resize %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:868 +msgid "Argument 'type' for reboot is not HARD or SOFT" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:872 +msgid "Missing argument 'type' for reboot" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:885 +#, python-format +msgid "Error in reboot %s" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:897 +msgid "Unable to locate requested flavor." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:900 +msgid "Resize requires a change in size." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:924 +msgid "Malformed server entity" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:931 +msgid "Missing imageRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:940 +msgid "Invalid imageRef provided." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:949 +msgid "Missing flavorRef attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:962 +msgid "No adminPass was specified" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:966 +#: cinder/api/openstack/compute/servers.py:1144 +msgid "Invalid adminPass" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:980 +msgid "Unable to parse metadata key/value pairs." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:993 +msgid "Resize request has invalid 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:996 +msgid "Resize requests require 'flavorRef' attribute." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1014 +#: cinder/api/openstack/compute/contrib/aggregates.py:142 +#: cinder/api/openstack/compute/contrib/networks.py:65 +msgid "Invalid request body" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1019 +msgid "Could not parse imageRef from request." +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1071 +msgid "Instance could not be found" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1074 +msgid "Cannot find image for rebuild" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1103 +msgid "createImage entity requires name attribute" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1112 +#: cinder/api/openstack/compute/contrib/admin_actions.py:238 +msgid "Invalid metadata" +msgstr "" + +#: cinder/api/openstack/compute/servers.py:1167 +#, python-format +msgid "Removing options '%(unk_opt_str)s' from query" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:60 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:77 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:94 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::暫停 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:111 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::繼續 %s" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:127 +#, python-format +msgid "Error in migrate %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:141 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:154 +#: cinder/api/openstack/compute/contrib/admin_actions.py:170 +#: cinder/api/openstack/compute/contrib/admin_actions.py:186 +#: cinder/api/openstack/compute/contrib/multinic.py:41 +#: cinder/api/openstack/compute/contrib/rescue.py:44 +msgid "Server not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:157 +#, python-format +msgid "Compute.api::inject_network_info %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:173 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:189 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:219 +#, python-format +msgid "createBackup entity requires %s attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:223 +msgid "Malformed createBackup entity" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:229 +msgid "createBackup attribute 'rotation' must be an integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:244 +#: cinder/api/openstack/compute/contrib/console_output.py:47 +#: cinder/api/openstack/compute/contrib/console_output.py:59 +#: cinder/api/openstack/compute/contrib/consoles.py:49 +#: cinder/api/openstack/compute/contrib/consoles.py:60 +#: cinder/api/openstack/compute/contrib/server_action_list.py:49 +#: cinder/api/openstack/compute/contrib/server_diagnostics.py:47 +#: cinder/api/openstack/compute/contrib/server_start_stop.py:38 +msgid "Instance not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:272 +msgid "host and block_migration must be specified." +msgstr "" + +#: cinder/api/openstack/compute/contrib/admin_actions.py:284 +#, python-format +msgid "Live migration of instance %(id)s to host %(host)s failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:76 +#, python-format +msgid "" +"Cannot create aggregate with name %(name)s and availability zone " +"%(avail_zone)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:88 +#, python-format +msgid "Cannot show aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:114 +#, python-format +msgid "Cannot update aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:126 +#, python-format +msgid "Cannot delete aggregate: %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:139 +#, python-format +msgid "Aggregates does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:152 +#: cinder/api/openstack/compute/contrib/aggregates.py:158 +#, python-format +msgid "Cannot add host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:171 +#: cinder/api/openstack/compute/contrib/aggregates.py:175 +#, python-format +msgid "Cannot remove host %(host)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/aggregates.py:195 +#, python-format +msgid "Cannot set metadata %(metadata)s in aggregate %(id)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/certificates.py:75 +msgid "Only root certificate can be retrieved." +msgstr "" + +#: cinder/api/openstack/compute/contrib/cloudpipe.py:146 +msgid "" +"Unable to claim IP for VPN instances, ensure it isn't running, and try " +"again in a few minutes" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:44 +msgid "Missing type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/consoles.py:56 +msgid "Invalid type specification" +msgstr "" + +#: cinder/api/openstack/compute/contrib/disk_config.py:44 +#, python-format +msgid "%s must be either 'MANUAL' or 'AUTO'." +msgstr "" + +#: cinder/api/openstack/compute/contrib/extended_server_attributes.py:77 +#: cinder/api/openstack/compute/contrib/extended_status.py:61 +msgid "Server not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextradata.py:61 +#: cinder/api/openstack/compute/contrib/flavorextradata.py:91 +msgid "Flavor not found." +msgstr "" + +#: cinder/api/openstack/compute/contrib/flavorextraspecs.py:49 +#: cinder/api/openstack/compute/contrib/volumetypes.py:158 +msgid "No Request Body" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:159 +#, python-format +msgid "No more floating ips in pool %s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:161 +msgid "No more floating ips available." +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:201 +#: cinder/api/openstack/compute/contrib/floating_ips.py:230 +#: cinder/api/openstack/compute/contrib/security_groups.py:571 +#: cinder/api/openstack/compute/contrib/security_groups.py:604 +msgid "Missing parameter dict" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:204 +#: cinder/api/openstack/compute/contrib/floating_ips.py:233 +msgid "Address not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:213 +msgid "No fixed ips associated to instance" +msgstr "" + +#: cinder/api/openstack/compute/contrib/floating_ips.py:216 +msgid "Associate floating ip failed" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:144 +#, python-format +msgid "Invalid status: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:148 +#, python-format +msgid "Invalid mode: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:152 +#, python-format +msgid "Invalid update setting: '%s'" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:170 +#, python-format +msgid "Putting host %(host)s in maintenance mode %(mode)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:181 +#, python-format +msgid "Setting host %(host)s to %(state)s." +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:230 +msgid "Describe-resource is admin only functionality" +msgstr "" + +#: cinder/api/openstack/compute/contrib/hosts.py:238 +msgid "Host not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:70 +msgid "Keypair name contains unsafe characters" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:95 +msgid "Keypair name must be between 1 and 255 characters long" +msgstr "" + +#: cinder/api/openstack/compute/contrib/keypairs.py:100 +#, python-format +msgid "Key pair '%s' already exists." +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:52 +msgid "Missing 'networkId' argument for addFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:68 +msgid "Missing 'address' argument for removeFixedIp" +msgstr "" + +#: cinder/api/openstack/compute/contrib/multinic.py:77 +#, python-format +msgid "Unable to find address %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:62 +#, python-format +msgid "Network does not have %s action" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:70 +#, python-format +msgid "Disassociating network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:74 +#: cinder/api/openstack/compute/contrib/networks.py:91 +#: cinder/api/openstack/compute/contrib/networks.py:101 +msgid "Network not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:87 +#, python-format +msgid "Showing network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/networks.py:97 +#, python-format +msgid "Deleting network with id %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/scheduler_hints.py:41 +msgid "Malformed scheduler_hints attribute" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:222 +msgid "Security group id should be integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:243 +msgid "Security group is still in use" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:295 +#, python-format +msgid "Security group %s already exists" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:315 +#, python-format +msgid "Security group %s is not a string or unicode" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:318 +#, python-format +msgid "Security group %s cannot be empty." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:321 +#, python-format +msgid "Security group %s should not be greater than 255 characters." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:348 +msgid "Parent group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:351 +#, python-format +msgid "Security group (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:369 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:376 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:414 +msgid "Parent or group id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:507 +msgid "Rule id is not integer" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:510 +#, python-format +msgid "Rule (%s) not found" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:574 +#: cinder/api/openstack/compute/contrib/security_groups.py:607 +msgid "Security group not specified" +msgstr "" + +#: cinder/api/openstack/compute/contrib/security_groups.py:578 +#: cinder/api/openstack/compute/contrib/security_groups.py:611 +msgid "Security group name cannot be empty" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:45 +#, python-format +msgid "start instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/server_start_stop.py:54 +#, python-format +msgid "stop instance %r" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:73 +#: cinder/api/openstack/volume/volumes.py:106 +#, python-format +msgid "vol=%s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:146 +#: cinder/api/openstack/volume/volumes.py:184 +#, python-format +msgid "Delete volume with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:329 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:481 +#: cinder/api/openstack/volume/snapshots.py:110 +#, python-format +msgid "Delete snapshot with id: %s" +msgstr "" + +#: cinder/api/openstack/compute/contrib/volumes.py:524 +#: cinder/api/openstack/volume/snapshots.py:150 +#, python-format +msgid "Create snapshot from volume %s" +msgstr "" + +#: cinder/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: cinder/auth/ldapdriver.py:650 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: cinder/auth/manager.py:298 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: cinder/auth/manager.py:302 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: cinder/auth/manager.py:308 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: cinder/auth/manager.py:315 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" + +#: cinder/auth/manager.py:324 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" + +#: cinder/auth/manager.py:331 cinder/auth/manager.py:343 +#, python-format +msgid "user.secret: %s" +msgstr "" + +#: cinder/auth/manager.py:332 cinder/auth/manager.py:344 +#, python-format +msgid "expected_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:333 cinder/auth/manager.py:345 +#, python-format +msgid "signature: %s" +msgstr "" + +#: cinder/auth/manager.py:335 cinder/auth/manager.py:357 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: cinder/auth/manager.py:353 +#, python-format +msgid "host_only_signature: %s" +msgstr "" + +#: cinder/auth/manager.py:449 +msgid "Must specify project" +msgstr "" + +#: cinder/auth/manager.py:490 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:493 +#, python-format +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:519 +#, python-format +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:522 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:595 +#, python-format +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" + +#: cinder/auth/manager.py:613 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: cinder/auth/manager.py:625 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:646 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" + +#: cinder/auth/manager.py:676 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: cinder/auth/manager.py:734 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" + +#: cinder/auth/manager.py:743 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: cinder/auth/manager.py:753 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:755 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: cinder/auth/manager.py:757 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" + +#: cinder/auth/manager.py:802 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:46 +msgid "Instance type for vpn instances" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:49 +msgid "Template for cloudpipe instance boot script" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:52 +msgid "Network to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:55 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: cinder/cloudpipe/pipelib.py:107 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: cinder/compute/api.py:141 +msgid "No compute host specified" +msgstr "" + +#: cinder/compute/api.py:144 +#, python-format +msgid "Unable to find host for Instance %s" +msgstr "" + +#: cinder/compute/api.py:192 +#, python-format +msgid "" +"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata " +"properties" +msgstr "" + +#: cinder/compute/api.py:203 +#, python-format +msgid "Quota exceeded for %(pid)s, metadata property key or value too long" +msgstr "" + +#: cinder/compute/api.py:257 +msgid "Cannot run any more instances of this type." +msgstr "" + +#: cinder/compute/api.py:259 +#, python-format +msgid "Can only run %s more instances of this type." +msgstr "" + +#: cinder/compute/api.py:261 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances. " +msgstr "" + +#: cinder/compute/api.py:310 +msgid "Creating a raw instance" +msgstr "" + +#: cinder/compute/api.py:312 +#, python-format +msgid "Using Kernel=%(kernel_id)s, Ramdisk=%(ramdisk_id)s" +msgstr "" + +#: cinder/compute/api.py:383 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: cinder/compute/api.py:447 +#, python-format +msgid "bdm %s" +msgstr "" + +#: cinder/compute/api.py:474 +#, python-format +msgid "block_device_mapping %s" +msgstr "" + +#: cinder/compute/api.py:591 +#, python-format +msgid "Sending create to scheduler for %(pid)s/%(uid)s's" +msgstr "" + +#: cinder/compute/api.py:871 +msgid "Going to try to soft delete instance" +msgstr "" + +#: cinder/compute/api.py:891 +msgid "No host for instance, deleting immediately" +msgstr "" + +#: cinder/compute/api.py:939 +msgid "Going to try to terminate instance" +msgstr "" + +#: cinder/compute/api.py:977 +msgid "Going to try to stop instance" +msgstr "" + +#: cinder/compute/api.py:996 +msgid "Going to try to start instance" +msgstr "" + +#: cinder/compute/api.py:1000 +#, python-format +msgid "Instance %(instance_uuid)s is not stopped. (%(vm_state)s" +msgstr "" + +#: cinder/compute/api.py:1071 cinder/volume/api.py:173 +#: cinder/volume/volume_types.py:64 +#, python-format +msgid "Searching by: %s" +msgstr "" + +#: cinder/compute/api.py:1201 +#, python-format +msgid "Image type not recognized %s" +msgstr "" + +#: cinder/compute/api.py:1369 +msgid "flavor_id is None. Assuming migration." +msgstr "" + +#: cinder/compute/api.py:1377 +#, python-format +msgid "" +"Old instance type %(current_instance_type_name)s, new instance type " +"%(new_instance_type_name)s" +msgstr "" + +#: cinder/compute/api.py:1644 +#, python-format +msgid "multiple fixedips exist, using the first: %s" +msgstr "" + +#: cinder/compute/instance_types.py:57 cinder/compute/instance_types.py:65 +msgid "create arguments must be positive integers" +msgstr "" + +#: cinder/compute/instance_types.py:76 cinder/volume/volume_types.py:41 +#, python-format +msgid "DB error: %s" +msgstr "" + +#: cinder/compute/instance_types.py:86 +#, python-format +msgid "Instance type %s not found for deletion" +msgstr "" + +#: cinder/compute/manager.py:138 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: cinder/compute/manager.py:140 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| " +"|%(instance_uuid)s|" +msgstr "" + +#: cinder/compute/manager.py:144 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: cinder/compute/manager.py:146 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: cinder/compute/manager.py:151 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: cinder/compute/manager.py:155 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: cinder/compute/manager.py:201 +#, python-format +msgid "Unable to load the virtualization driver: %s" +msgstr "" + +#: cinder/compute/manager.py:223 +#, python-format +msgid "" +"Instance %(instance_uuid)s has been destroyed from under us while trying " +"to set it to ERROR" +msgstr "" + +#: cinder/compute/manager.py:240 +#, python-format +msgid "Current state is %(drv_state)s, state in DB is %(db_state)s." +msgstr "" + +#: cinder/compute/manager.py:245 +msgid "Rebooting instance after cinder-compute restart." +msgstr "" + +#: cinder/compute/manager.py:255 +msgid "Hypervisor driver does not support firewall rules" +msgstr "" + +#: cinder/compute/manager.py:260 +msgid "Checking state" +msgstr "" + +#: cinder/compute/manager.py:329 +#, python-format +msgid "Setting up bdm %s" +msgstr "" + +#: cinder/compute/manager.py:400 +#, python-format +msgid "Instance %s already deleted from database. Attempting forceful vm deletion" +msgstr "" + +#: cinder/compute/manager.py:406 +#, python-format +msgid "Exception encountered while terminating the instance %s" +msgstr "" + +#: cinder/compute/manager.py:444 +#, python-format +msgid "Instance %s not found." +msgstr "" + +#: cinder/compute/manager.py:480 +msgid "Instance has already been created" +msgstr "" + +#: cinder/compute/manager.py:523 +#, python-format +msgid "" +"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, " +"allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:528 +#, python-format +msgid "" +"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed " +"size %(allowed_size_bytes)d" +msgstr "" + +#: cinder/compute/manager.py:538 +msgid "Starting instance..." +msgstr "" + +#: cinder/compute/manager.py:548 +msgid "Skipping network allocation for instance" +msgstr "" + +#: cinder/compute/manager.py:561 +msgid "Instance failed network setup" +msgstr "" + +#: cinder/compute/manager.py:565 +#, python-format +msgid "Instance network_info: |%s|" +msgstr "" + +#: cinder/compute/manager.py:578 +msgid "Instance failed block device setup" +msgstr "" + +#: cinder/compute/manager.py:594 +msgid "Instance failed to spawn" +msgstr "" + +#: cinder/compute/manager.py:615 +msgid "Deallocating network for instance" +msgstr "" + +#: cinder/compute/manager.py:672 +#, python-format +msgid "%(action_str)s instance" +msgstr "" + +#: cinder/compute/manager.py:699 +#, python-format +msgid "Ignoring DiskNotFound: %s" +msgstr "" + +#: cinder/compute/manager.py:708 +#, python-format +msgid "terminating bdm %s" +msgstr "" + +#: cinder/compute/manager.py:742 cinder/compute/manager.py:1328 +#: cinder/compute/manager.py:1416 cinder/compute/manager.py:2501 +#, python-format +msgid "%s. Setting instance vm_state to ERROR" +msgstr "" + +#: cinder/compute/manager.py:811 +#, python-format +msgid "" +"Cannot rebuild instance [%(instance_uuid)s], because the given image does" +" not exist." +msgstr "" + +#: cinder/compute/manager.py:816 +#, python-format +msgid "Cannot rebuild instance [%(instance_uuid)s]: %(exc)s" +msgstr "" + +#: cinder/compute/manager.py:823 +#, python-format +msgid "Rebuilding instance %s" +msgstr "" + +#: cinder/compute/manager.py:876 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: cinder/compute/manager.py:891 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:933 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: cinder/compute/manager.py:939 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_uuid)s (state: " +"%(state)s expected: %(running)s)" +msgstr "" + +#: cinder/compute/manager.py:995 +#, python-format +msgid "Found %(num_images)d images (rotation: %(rotation)d)" +msgstr "" + +#: cinder/compute/manager.py:1001 +#, python-format +msgid "Rotating out %d backups" +msgstr "" + +#: cinder/compute/manager.py:1005 +#, python-format +msgid "Deleting image %s" +msgstr "" + +#: cinder/compute/manager.py:1035 +#, python-format +msgid "Failed to set admin password. Instance %s is not running" +msgstr "" + +#: cinder/compute/manager.py:1041 +#, python-format +msgid "Instance %s: Root password set" +msgstr "" + +#: cinder/compute/manager.py:1050 +msgid "set_admin_password is not implemented by this driver." +msgstr "" + +#: cinder/compute/manager.py:1064 +msgid "Error setting admin password" +msgstr "" + +#: cinder/compute/manager.py:1079 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1084 +#, python-format +msgid "instance %(instance_uuid)s: injecting file to %(path)s" +msgstr "" + +#: cinder/compute/manager.py:1098 +#, python-format +msgid "" +"trying to update agent on a non-running instance: %(instance_uuid)s " +"(state: %(current_power_state)s expected: %(expected_state)s)" +msgstr "" + +#: cinder/compute/manager.py:1103 +#, python-format +msgid "instance %(instance_uuid)s: updating agent to %(url)s" +msgstr "" + +#: cinder/compute/manager.py:1116 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: cinder/compute/manager.py:1141 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: cinder/compute/manager.py:1270 +msgid "destination same as source!" +msgstr "" + +#: cinder/compute/manager.py:1287 +#, python-format +msgid "instance %s: migrating" +msgstr "" + +#: cinder/compute/manager.py:1471 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: cinder/compute/manager.py:1489 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: cinder/compute/manager.py:1525 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: cinder/compute/manager.py:1534 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: cinder/compute/manager.py:1556 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: cinder/compute/manager.py:1579 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: cinder/compute/manager.py:1588 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: cinder/compute/manager.py:1596 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: cinder/compute/manager.py:1606 +#, python-format +msgid "instance %s: reset network" +msgstr "" + +#: cinder/compute/manager.py:1614 +#, python-format +msgid "instance %s: inject network info" +msgstr "" + +#: cinder/compute/manager.py:1618 +#, python-format +msgid "network_info to inject: |%s|" +msgstr "" + +#: cinder/compute/manager.py:1655 +#, python-format +msgid "instance %s: getting vnc console" +msgstr "" + +#: cinder/compute/manager.py:1685 +#, python-format +msgid "Booting with volume %(volume_id)s at %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1703 +#, python-format +msgid "" +"instance %(instance_uuid)s: attaching volume %(volume_id)s to " +"%(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1705 +#, python-format +msgid "Attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" + +#: cinder/compute/manager.py:1714 +#, python-format +msgid "instance %(instance_uuid)s: attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1724 +#, python-format +msgid "Attach failed %(mountpoint)s, removing" +msgstr "" + +#: cinder/compute/manager.py:1752 +#, python-format +msgid "Detach volume %(volume_id)s from mountpoint %(mp)s" +msgstr "" + +#: cinder/compute/manager.py:1756 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: cinder/compute/manager.py:1822 +#, python-format +msgid "" +"Creating tmpfile %s to notify to other compute nodes that they should " +"mount the same storage." +msgstr "" + +#: cinder/compute/manager.py:1884 +msgid "Instance has no volume." +msgstr "" + +#: cinder/compute/manager.py:1916 +#, python-format +msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s." +msgstr "" + +#: cinder/compute/manager.py:1973 +#, python-format +msgid "Pre live migration failed at %(dest)s" +msgstr "" + +#: cinder/compute/manager.py:2000 +msgid "post_live_migration() is started.." +msgstr "" + +#: cinder/compute/manager.py:2030 +msgid "No floating_ip found" +msgstr "" + +#: cinder/compute/manager.py:2038 +msgid "No floating_ip found." +msgstr "" + +#: cinder/compute/manager.py:2040 +#, python-format +msgid "" +"Live migration: Unexpected error: cannot inherit floating ip.\n" +"%(e)s" +msgstr "" + +#: cinder/compute/manager.py:2073 +#, python-format +msgid "Migrating instance to %(dest)s finished successfully." +msgstr "" + +#: cinder/compute/manager.py:2075 +msgid "" +"You may see the error \"libvirt: QEMU error: Domain not found: no domain " +"with matching name.\" This error can be safely ignored." +msgstr "" + +#: cinder/compute/manager.py:2090 +msgid "Post operation of migraton started" +msgstr "" + +#: cinder/compute/manager.py:2226 +#, python-format +msgid "Updated the info_cache for instance %s" +msgstr "" + +#: cinder/compute/manager.py:2255 +msgid "Updating bandwidth usage cache" +msgstr "" + +#: cinder/compute/manager.py:2277 +msgid "Updating host status" +msgstr "" + +#: cinder/compute/manager.py:2305 +#, python-format +msgid "" +"Found %(num_db_instances)s in the database and %(num_vm_instances)s on " +"the hypervisor." +msgstr "" + +#: cinder/compute/manager.py:2331 +#, python-format +msgid "" +"During the sync_power process the instance %(uuid)s has moved from host " +"%(src)s to host %(dst)s" +msgstr "" + +#: cinder/compute/manager.py:2344 +#, python-format +msgid "" +"Instance %s is in the process of migrating to this host. Wait next " +"sync_power cycle before setting power state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2350 +msgid "" +"Instance found in database but not known by hypervisor. Setting power " +"state to NOSTATE" +msgstr "" + +#: cinder/compute/manager.py:2380 +msgid "FLAGS.reclaim_instance_interval <= 0, skipping..." +msgstr "" + +#: cinder/compute/manager.py:2392 +msgid "Reclaiming deleted instance" +msgstr "" + +#: cinder/compute/manager.py:2458 +#, python-format +msgid "" +"Detected instance with name label '%(name)s' which is marked as DELETED " +"but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2465 +#, python-format +msgid "" +"Destroying instance with name label '%(name)s' which is marked as DELETED" +" but still present on host." +msgstr "" + +#: cinder/compute/manager.py:2472 +#, python-format +msgid "Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action" +msgstr "" + +#: cinder/compute/manager.py:2542 +#, python-format +msgid "" +"Aggregate %(aggregate_id)s: unrecoverable state during operation on " +"%(host)s" +msgstr "" + +#: cinder/compute/utils.py:142 +msgid "v4 subnets are required for legacy nw_info" +msgstr "" + +#: cinder/console/manager.py:77 cinder/console/vmrc_manager.py:70 +msgid "Adding console" +msgstr "" + +#: cinder/console/manager.py:97 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:122 +#, python-format +msgid "Tried to remove non-existent console %(console_id)s." +msgstr "" + +#: cinder/console/vmrc_manager.py:125 +#, python-format +msgid "Removing console %(console_id)s." +msgstr "" + +#: cinder/console/xvp.py:98 +msgid "Rebuilding xvp conf" +msgstr "" + +#: cinder/console/xvp.py:116 +#, python-format +msgid "Re-wrote %s" +msgstr "" + +#: cinder/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: cinder/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: cinder/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: cinder/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: cinder/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: cinder/consoleauth/manager.py:63 +#, python-format +msgid "Deleting Expired Token: (%s)" +msgstr "" + +#: cinder/consoleauth/manager.py:75 +#, python-format +msgid "Received Token: %(token)s, %(token_dict)s)" +msgstr "" + +#: cinder/consoleauth/manager.py:79 +#, python-format +msgid "Checking Token: %(token)s, %(token_valid)s)" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:57 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:198 +#, python-format +msgid "Unrecognized read_deleted value '%s'" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:516 cinder/db/sqlalchemy/api.py:551 +#, python-format +msgid "No ComputeNode for %(host)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4019 cinder/db/sqlalchemy/api.py:4045 +#, python-format +msgid "No backend config with id %(sm_backend_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4103 +#, python-format +msgid "No sm_flavor called %(sm_flavor)s" +msgstr "" + +#: cinder/db/sqlalchemy/api.py:4147 +#, python-format +msgid "No sm_volume with id %(volume_id)s" +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:66 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: cinder/db/sqlalchemy/migration.py:78 +msgid "version should be an integer" +msgstr "" + +#: cinder/db/sqlalchemy/session.py:137 +#, python-format +msgid "SQL connection failed. %s attempts left." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:48 +msgid "interface column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:80 +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:54 +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:48 +#, python-format +msgid "Table |%s| not created!" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:87 +msgid "VIF column not added to fixed_ips table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py:97 +#, python-format +msgid "join list for moving mac_addresses |%s|" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:39 +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:60 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:61 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:99 +msgid "foreign key constraint couldn't be added" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py:58 +msgid "foreign key constraint couldn't be dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/045_add_network_priority.py:34 +msgid "priority column not added to networks table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py:41 +#: cinder/db/sqlalchemy/migrate_repo/versions/060_remove_network_fk_from_vif.py:42 +#: cinder/db/sqlalchemy/migrate_repo/versions/064_change_instance_id_to_uuid_in_instance_actions.py:56 +#: cinder/db/sqlalchemy/migrate_repo/versions/070_untie_cinder_network_models.py:68 +msgid "foreign key constraint couldn't be removed" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/049_add_instances_progress.py:34 +msgid "progress column not added to instances table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/055_convert_flavor_id_to_str.py:97 +#, python-format +msgid "" +"Could not cast flavorid to integer: %s. Set flavorid to an integer-like " +"string to downgrade." +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/062_add_instance_info_cache_table.py:69 +msgid "instance_info_caches tables not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/069_block_migration.py:41 +msgid "progress column not added to compute_nodes table" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/072_add_dns_table.py:76 +msgid "dns_domains table not dropped" +msgstr "" + +#: cinder/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:60 +msgid "quota_classes table not dropped" +msgstr "" + +#: cinder/image/glance.py:147 +msgid "Connection error contacting glance server, retrying" +msgstr "" + +#: cinder/image/glance.py:153 cinder/network/quantum/melange_connection.py:104 +msgid "Maximum attempts reached" +msgstr "" + +#: cinder/image/glance.py:278 +#, python-format +msgid "Creating image in Glance. Metadata passed in %s" +msgstr "" + +#: cinder/image/glance.py:281 +#, python-format +msgid "Metadata after formatting for Glance %s" +msgstr "" + +#: cinder/image/glance.py:289 +#, python-format +msgid "Metadata returned from Glance formatted for Base %s" +msgstr "" + +#: cinder/image/glance.py:331 cinder/image/glance.py:335 +msgid "Not the image owner" +msgstr "" + +#: cinder/image/glance.py:410 +#, python-format +msgid "%(timestamp)s does not follow any of the signatures: %(iso_formats)s" +msgstr "" + +#: cinder/image/s3.py:309 +#, python-format +msgid "Failed to download %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:328 +#, python-format +msgid "Failed to decrypt %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:340 +#, python-format +msgid "Failed to untar %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:353 +#, python-format +msgid "Failed to upload %(image_location)s to %(image_path)s" +msgstr "" + +#: cinder/image/s3.py:379 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: cinder/image/s3.py:387 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: cinder/image/s3.py:398 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: cinder/image/s3.py:410 +msgid "Unsafe filenames in image" +msgstr "" + +#: cinder/ipv6/account_identifier.py:38 cinder/ipv6/rfc2462.py:34 +#, python-format +msgid "Bad mac for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:40 cinder/ipv6/rfc2462.py:36 +#, python-format +msgid "Bad prefix for to_global_ipv6: %s" +msgstr "" + +#: cinder/ipv6/account_identifier.py:42 +#, python-format +msgid "Bad project_id for to_global_ipv6: %s" +msgstr "" + +#: cinder/network/ldapdns.py:321 +msgid "This driver only supports type 'a' entries." +msgstr "" + +#: cinder/network/linux_net.py:166 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: cinder/network/linux_net.py:192 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + +#: cinder/network/linux_net.py:215 +#, python-format +msgid "" +"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " +"%(top)r" +msgstr "" + +#: cinder/network/linux_net.py:335 +msgid "IPTablesManager.apply completed with success" +msgstr "" + +#: cinder/network/linux_net.py:694 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: cinder/network/linux_net.py:696 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: cinder/network/linux_net.py:756 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: cinder/network/linux_net.py:758 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#: cinder/network/linux_net.py:967 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: cinder/network/linux_net.py:999 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: cinder/network/linux_net.py:1142 +#, python-format +msgid "Starting bridge %s " +msgstr "" + +#: cinder/network/linux_net.py:1149 +#, python-format +msgid "Done starting bridge %s" +msgstr "" + +#: cinder/network/linux_net.py:1167 +#, python-format +msgid "Failed unplugging gateway interface '%s'" +msgstr "" + +#: cinder/network/linux_net.py:1170 +#, python-format +msgid "Unplugged gateway interface '%s'" +msgstr "" + +#: cinder/network/manager.py:291 +#, python-format +msgid "Fixed ip %(fixed_ip_id)s not found" +msgstr "" + +#: cinder/network/manager.py:300 cinder/network/manager.py:496 +#, python-format +msgid "Interface %(interface)s not found" +msgstr "" + +#: cinder/network/manager.py:315 +#, python-format +msgid "floating IP allocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:353 +#, python-format +msgid "floating IP deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:386 +#, python-format +msgid "Address |%(address)s| is not allocated" +msgstr "" + +#: cinder/network/manager.py:390 +#, python-format +msgid "Address |%(address)s| is not allocated to your project |%(project)s|" +msgstr "" + +#: cinder/network/manager.py:402 +#, python-format +msgid "Quota exceeded for %s, tried to allocate address" +msgstr "" + +#: cinder/network/manager.py:614 +#, python-format +msgid "" +"Database inconsistency: DNS domain |%s| is registered in the Cinder db but " +"not visible to either the floating or instance DNS driver. It will be " +"ignored." +msgstr "" + +#: cinder/network/manager.py:660 +#, python-format +msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|." +msgstr "" + +#: cinder/network/manager.py:670 +#, python-format +msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|." +msgstr "" + +#: cinder/network/manager.py:778 +#, python-format +msgid "Disassociated %s stale fixed ip(s)" +msgstr "" + +#: cinder/network/manager.py:782 +msgid "setting network host" +msgstr "" + +#: cinder/network/manager.py:896 +#, python-format +msgid "network allocations for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:901 +#, python-format +msgid "networks retrieved for instance |%(instance_id)s|: |%(networks)s|" +msgstr "" + +#: cinder/network/manager.py:930 +#, python-format +msgid "network deallocation for instance |%s|" +msgstr "" + +#: cinder/network/manager.py:1152 +#, python-format +msgid "" +"instance-dns-zone is |%(domain)s|, which is in availability zone " +"|%(zone)s|. Instance |%(instance)s| is in zone |%(zone2)s|. No DNS record" +" will be created." +msgstr "" + +#: cinder/network/manager.py:1227 +#, python-format +msgid "Unable to release %s because vif doesn't exist." +msgstr "" + +#: cinder/network/manager.py:1244 +#, python-format +msgid "Leased IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1248 +#, python-format +msgid "IP %s leased that is not associated" +msgstr "" + +#: cinder/network/manager.py:1256 +#, python-format +msgid "IP |%s| leased that isn't allocated" +msgstr "" + +#: cinder/network/manager.py:1261 +#, python-format +msgid "Released IP |%(address)s|" +msgstr "" + +#: cinder/network/manager.py:1265 +#, python-format +msgid "IP %s released that is not associated" +msgstr "" + +#: cinder/network/manager.py:1268 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: cinder/network/manager.py:1331 +msgid "cidr already in use" +msgstr "" + +#: cinder/network/manager.py:1334 +#, python-format +msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)" +msgstr "" + +#: cinder/network/manager.py:1345 +#, python-format +msgid "" +"requested cidr (%(cidr)s) conflicts with existing smaller cidr " +"(%(smaller)s)" +msgstr "" + +#: cinder/network/manager.py:1404 +msgid "Network already exists!" +msgstr "" + +#: cinder/network/manager.py:1423 +#, python-format +msgid "Network must be disassociated from project %s before delete" +msgstr "" + +#: cinder/network/manager.py:1832 +msgid "" +"The sum between the number of networks and the vlan start cannot be " +"greater than 4094" +msgstr "" + +#: cinder/network/manager.py:1839 +#, python-format +msgid "" +"The network range is not big enough to fit %(num_networks)s. Network size" +" is %(network_size)s" +msgstr "" + +#: cinder/network/minidns.py:65 +msgid "This driver only supports type 'a'" +msgstr "" + +#: cinder/network/quantum/client.py:154 +msgid "Tenant ID not set" +msgstr "" + +#: cinder/network/quantum/client.py:180 +#, python-format +msgid "Quantum Client Request: %(method)s %(action)s" +msgstr "" + +#: cinder/network/quantum/client.py:196 +#, python-format +msgid "Quantum entity not found: %s" +msgstr "" + +#: cinder/network/quantum/client.py:206 +#, python-format +msgid "Server %(status_code)s error: %(data)s" +msgstr "" + +#: cinder/network/quantum/client.py:210 +#, python-format +msgid "Unable to connect to server. Got error: %s" +msgstr "" + +#: cinder/network/quantum/client.py:228 +#, python-format +msgid "unable to deserialize object of type = '%s'" +msgstr "" + +#: cinder/network/quantum/manager.py:162 +msgid "QuantumManager does not use 'multi_host' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:166 +msgid "QuantumManager requires that only one network is created per call" +msgstr "" + +#: cinder/network/quantum/manager.py:176 +msgid "QuantumManager does not use 'vlan_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:182 +msgid "QuantumManager does not use 'vpn_start' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:186 +msgid "QuantumManager does not use 'bridge' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:190 +msgid "QuantumManager does not use 'bridge_interface' parameter." +msgstr "" + +#: cinder/network/quantum/manager.py:195 +msgid "QuantumManager requires a valid (.1) gateway address." +msgstr "" + +#: cinder/network/quantum/manager.py:204 +#, python-format +msgid "" +"Unable to find existing quantum network for tenant '%(q_tenant_id)s' with" +" net-id '%(quantum_net_id)s'" +msgstr "" + +#: cinder/network/quantum/manager.py:301 +#, python-format +msgid "network allocations for instance %s" +msgstr "" + +#: cinder/network/quantum/manager.py:588 +#, python-format +msgid "" +"port deallocation failed for instance: |%(instance_id)s|, port_id: " +"|%(port_id)s|" +msgstr "" + +#: cinder/network/quantum/manager.py:606 +#, python-format +msgid "" +"ipam deallocation failed for instance: |%(instance_id)s|, vif_uuid: " +"|%(vif_uuid)s|" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:96 +#, python-format +msgid "Server returned error: %s" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:98 +msgid "Connection error contacting melange service, retrying" +msgstr "" + +#: cinder/network/quantum/melange_connection.py:108 +#, python-format +msgid "" +"allocate IP on network |%(network_id)s| belonging to " +"|%(network_tenant_id)s| to this vif |%(vif_id)s| with mac " +"|%(mac_address)s| belonging to |%(project_id)s| " +msgstr "" + +#: cinder/network/quantum/melange_ipam_lib.py:133 +msgid "get_project_and_global_net_ids must be called with a non-null project_id" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:75 +msgid "Error creating network entry" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:90 +#, python-format +msgid "No network with net_id = %s" +msgstr "" + +#: cinder/network/quantum/cinder_ipam_lib.py:221 +#, python-format +msgid "No fixed IPs to deallocate for vif %s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:99 +#, python-format +msgid "Connecting interface %(interface_id)s to net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/network/quantum/quantum_connection.py:113 +#, python-format +msgid "Deleting port %(port_id)s on net %(net_id)s for %(tenant_id)s" +msgstr "" + +#: cinder/notifier/api.py:115 +#, python-format +msgid "%s not in valid priorities" +msgstr "" + +#: cinder/notifier/api.py:130 +#, python-format +msgid "" +"Problem '%(e)s' attempting to send to notification system. " +"Payload=%(payload)s" +msgstr "" + +#: cinder/notifier/list_notifier.py:65 +#, python-format +msgid "Problem '%(e)s' attempting to send to notification driver %(driver)s." +msgstr "" + +#: cinder/notifier/rabbit_notifier.py:46 +#, python-format +msgid "Could not send notification to %(topic)s. Payload=%(message)s" +msgstr "" + +#: cinder/rpc/amqp.py:146 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: cinder/rpc/amqp.py:188 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: cinder/rpc/amqp.py:231 +#, python-format +msgid "received %s" +msgstr "" + +#: cinder/rpc/amqp.py:236 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:237 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: cinder/rpc/amqp.py:321 +#, python-format +msgid "Making asynchronous call on %s ..." +msgstr "" + +#: cinder/rpc/amqp.py:324 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: cinder/rpc/amqp.py:346 +#, python-format +msgid "Making asynchronous cast on %s..." +msgstr "" + +#: cinder/rpc/amqp.py:354 +msgid "Making asynchronous fanout cast..." +msgstr "" + +#: cinder/rpc/amqp.py:379 +#, python-format +msgid "Sending notification on %s..." +msgstr "" + +#: cinder/rpc/common.py:54 +#, python-format +msgid "" +"Remote error: %(exc_type)s %(value)s\n" +"%(traceback)s." +msgstr "" + +#: cinder/rpc/common.py:71 +msgid "Timeout while waiting on RPC response." +msgstr "" + +#: cinder/rpc/impl_kombu.py:111 +msgid "Failed to process message... skipping it." +msgstr "" + +#: cinder/rpc/impl_kombu.py:407 +#, python-format +msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:430 +#, python-format +msgid "Connected to AMQP server on %(hostname)s:%(port)d" +msgstr "" + +#: cinder/rpc/impl_kombu.py:466 +#, python-format +msgid "" +"Unable to connect to AMQP server on %(hostname)s:%(port)d after " +"%(max_retries)d tries: %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:482 +#, python-format +msgid "" +"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying " +"again in %(sleep_time)d seconds." +msgstr "" + +#: cinder/rpc/impl_kombu.py:533 cinder/rpc/impl_qpid.py:385 +#, python-format +msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:551 cinder/rpc/impl_qpid.py:400 +#, python-format +msgid "Timed out waiting for RPC response: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:555 cinder/rpc/impl_qpid.py:404 +#, python-format +msgid "Failed to consume message from queue: %s" +msgstr "" + +#: cinder/rpc/impl_kombu.py:589 cinder/rpc/impl_qpid.py:434 +#, python-format +msgid "Failed to publish message to topic '%(topic)s': %(err_str)s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:341 +#, python-format +msgid "Unable to connect to AMQP server: %s " +msgstr "" + +#: cinder/rpc/impl_qpid.py:346 +#, python-format +msgid "Connected to AMQP server on %s" +msgstr "" + +#: cinder/rpc/impl_qpid.py:354 +msgid "Re-established AMQP queues" +msgstr "" + +#: cinder/rpc/impl_qpid.py:412 +msgid "Error processing message. Skipping it." +msgstr "" + +#: cinder/scheduler/chance.py:47 cinder/scheduler/simple.py:91 +#: cinder/scheduler/simple.py:143 +msgid "Is the appropriate service running?" +msgstr "" + +#: cinder/scheduler/chance.py:52 +msgid "Could not find another compute" +msgstr "" + +#: cinder/scheduler/driver.py:63 +#, python-format +msgid "Casted '%(method)s' to volume '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:80 +#, python-format +msgid "Casted '%(method)s' to compute '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:89 +#, python-format +msgid "Casted '%(method)s' to network '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:107 +#, python-format +msgid "Casted '%(method)s' to %(topic)s '%(host)s'" +msgstr "" + +#: cinder/scheduler/driver.py:181 +msgid "Must implement a fallback schedule" +msgstr "" + +#: cinder/scheduler/driver.py:185 +msgid "Driver must implement schedule_prep_resize" +msgstr "" + +#: cinder/scheduler/driver.py:190 +msgid "Driver must implement schedule_run_instance" +msgstr "" + +#: cinder/scheduler/driver.py:325 +msgid "Block migration can not be used with shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:330 +msgid "Live migration can not be used without shared storage." +msgstr "" + +#: cinder/scheduler/driver.py:367 cinder/scheduler/driver.py:457 +#, python-format +msgid "host %(dest)s is not compatible with original host %(src)s." +msgstr "" + +#: cinder/scheduler/driver.py:416 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"memory(host:%(avail)s <= instance:%(mem_inst)s)" +msgstr "" + +#: cinder/scheduler/driver.py:472 +#, python-format +msgid "" +"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of " +"disk(host:%(available)s <= instance:%(necessary)s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:51 +#, python-format +msgid "No host selection for %s defined." +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:64 +#, python-format +msgid "Attempting to build %(num_instances)d instance(s)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:157 +msgid "Scheduler only understands Compute nodes (for now)" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:199 +#, python-format +msgid "Filtered %(hosts)s" +msgstr "" + +#: cinder/scheduler/filter_scheduler.py:209 +#, python-format +msgid "Weighted %(weighted_host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:144 +#, python-format +msgid "Host filter fails for ignored host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:151 +#, python-format +msgid "Host filter fails for non-forced host %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:157 +#, python-format +msgid "Host filter function %(func)s failed for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:163 +#, python-format +msgid "Host filter passes for %(host)s" +msgstr "" + +#: cinder/scheduler/host_manager.py:272 +#, python-format +msgid "Received %(service_name)s service update from %(host)s." +msgstr "" + +#: cinder/scheduler/host_manager.py:313 +msgid "host_manager only implemented for 'compute'" +msgstr "" + +#: cinder/scheduler/host_manager.py:323 +#, python-format +msgid "No service for compute ID %s" +msgstr "" + +#: cinder/scheduler/manager.py:85 +#, python-format +msgid "" +"Driver Method %(driver_method_name)s missing: %(e)s. Reverting to " +"schedule()" +msgstr "" + +#: cinder/scheduler/manager.py:150 +#, python-format +msgid "Failed to schedule_%(method)s: %(ex)s" +msgstr "" + +#: cinder/scheduler/manager.py:159 +#, python-format +msgid "Setting instance %(instance_uuid)s to %(state)s state." +msgstr "" + +#: cinder/scheduler/scheduler_options.py:66 +#, python-format +msgid "Could not stat scheduler options file %(filename)s: '%(e)s'" +msgstr "" + +#: cinder/scheduler/scheduler_options.py:75 +#, python-format +msgid "Could not decode scheduler options: '%(e)s'" +msgstr "" + +#: cinder/scheduler/simple.py:87 +msgid "Not enough allocatable CPU cores remaining" +msgstr "" + +#: cinder/scheduler/simple.py:137 +msgid "Not enough allocatable volume gigabytes remaining" +msgstr "" + +#: cinder/scheduler/filters/core_filter.py:45 +msgid "VCPUs not set; assuming CPU collection broken" +msgstr "" + +#: cinder/tests/fake_utils.py:72 +#, python-format +msgid "Faking execution of cmd (subprocess): %s" +msgstr "" + +#: cinder/tests/fake_utils.py:80 +#, python-format +msgid "Faked command matched %s" +msgstr "" + +#: cinder/tests/fake_utils.py:96 +#, python-format +msgid "Faked command raised an exception %s" +msgstr "" + +#: cinder/tests/fake_utils.py:101 +#, python-format +msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'" +msgstr "" + +#: cinder/tests/fakelibvirt.py:784 +msgid "Please extend mock libvirt module to support flags" +msgstr "" + +#: cinder/tests/fakelibvirt.py:790 +msgid "Please extend fake libvirt module to support this auth method" +msgstr "" + +#: cinder/tests/test_compute.py:365 cinder/tests/test_compute.py:1419 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:371 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: cinder/tests/test_compute.py:589 +msgid "Internal error" +msgstr "" + +#: cinder/tests/test_compute.py:1430 +#, python-format +msgid "After force-killing instances: %s" +msgstr "" + +#: cinder/tests/test_misc.py:92 +#, python-format +msgid "" +"The following migrations are missing a downgrade:\n" +"\t%s" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:169 +msgid "id" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:170 +msgid "IPv4" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:171 +msgid "IPv6" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:172 +msgid "start address" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:173 +msgid "DNS1" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:174 +msgid "DNS2" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:175 +msgid "VlanID" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:176 +msgid "project" +msgstr "" + +#: cinder/tests/test_cinder_manage.py:177 +msgid "uuid" +msgstr "" + +#: cinder/tests/test_volume.py:216 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: cinder/tests/test_volume.py:468 +#, python-format +msgid "Cannot confirm exported volume id:%s." +msgstr "" + +#: cinder/tests/test_volume_types.py:58 +#, python-format +msgid "Given data: %s" +msgstr "" + +#: cinder/tests/test_volume_types.py:59 +#, python-format +msgid "Result data: %s" +msgstr "" + +#: cinder/tests/test_xenapi.py:626 +#, python-format +msgid "Creating files in %s to simulate guest agent" +msgstr "" + +#: cinder/tests/test_xenapi.py:637 +#, python-format +msgid "Removing simulated guest agent files in %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/test_servers.py:2144 +#, python-format +msgid "Quota exceeded: code=%(code)s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:57 +#, python-format +msgid "_create: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:66 +#, python-format +msgid "_delete: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:75 +#, python-format +msgid "_get: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:85 +#, python-format +msgid "_get_all: %s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:125 +#, python-format +msgid "test_snapshot_create: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:134 +#, python-format +msgid "test_snapshot_create: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:156 +#, python-format +msgid "test_snapshot_create_force: param=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:165 +#, python-format +msgid "test_snapshot_create_force: resp_dict=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:205 +#, python-format +msgid "test_snapshot_show: resp=%s" +msgstr "" + +#: cinder/tests/api/openstack/compute/contrib/test_snapshots.py:231 +#, python-format +msgid "test_snapshot_detail: resp_dict=%s" +msgstr "" + +#: cinder/tests/integrated/test_login.py:31 +#, python-format +msgid "flavor: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:38 +#, python-format +msgid "" +"%(message)s\n" +"Status Code: %(_status)s\n" +"Body: %(_body)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:47 +msgid "Authentication error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:55 +msgid "Authorization error" +msgstr "" + +#: cinder/tests/integrated/api/client.py:63 +msgid "Item not found" +msgstr "" + +#: cinder/tests/integrated/api/client.py:105 +#, python-format +msgid "Doing %(method)s on %(relative_url)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:107 +#, python-format +msgid "Body: %s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:125 +#, python-format +msgid "%(auth_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:151 +#, python-format +msgid "%(relative_uri)s => code %(http_status)s" +msgstr "" + +#: cinder/tests/integrated/api/client.py:161 +msgid "Unexpected status code" +msgstr "" + +#: cinder/tests/integrated/api/client.py:168 +#, python-format +msgid "Decoding JSON: %s" +msgstr "" + +#: cinder/tests/rpc/common.py:133 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: cinder/tests/rpc/common.py:142 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: cinder/tests/rpc/common.py:160 +msgid "RPC backend does not support timeouts" +msgstr "" + +#: cinder/tests/rpc/common.py:227 cinder/tests/rpc/common.py:233 +#, python-format +msgid "Received %s" +msgstr "" + +#: cinder/virt/connection.py:85 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: cinder/virt/fake.py:270 cinder/virt/xenapi_conn.py:396 +#: cinder/virt/baremetal/proxy.py:716 cinder/virt/libvirt/connection.py:2045 +#, python-format +msgid "Compute_service record created for %s " +msgstr "" + +#: cinder/virt/fake.py:273 cinder/virt/xenapi_conn.py:399 +#: cinder/virt/baremetal/proxy.py:720 cinder/virt/libvirt/connection.py:2048 +#, python-format +msgid "Compute_service record updated for %s " +msgstr "" + +#: cinder/virt/firewall.py:130 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: cinder/virt/firewall.py:137 +#, python-format +msgid "Filters added to instance %s" +msgstr "" + +#: cinder/virt/firewall.py:139 +msgid "Provider Firewall Rules refreshed" +msgstr "" + +#: cinder/virt/firewall.py:291 +#, python-format +msgid "Adding security group rule: %r" +msgstr "" + +#: cinder/virt/firewall.py:403 cinder/virt/xenapi/firewall.py:87 +#, python-format +msgid "Adding provider rule: %s" +msgstr "" + +#: cinder/virt/images.py:86 +msgid "'qemu-img info' parsing failed." +msgstr "" + +#: cinder/virt/images.py:92 +#, python-format +msgid "fmt=%(fmt)s backed by: %(backing_file)s" +msgstr "" + +#: cinder/virt/images.py:104 +#, python-format +msgid "Converted to raw, but format is now %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:105 +msgid "" +"Must specify vmwareapi_host_ip,vmwareapi_host_username and " +"vmwareapi_host_password to useconnection_type=vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:276 +#, python-format +msgid "In vmwareapi:_create_session, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:359 +#, python-format +msgid "In vmwareapi:_call_method, got this exception: %s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:398 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: success" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:404 +#, python-format +msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s" +msgstr "" + +#: cinder/virt/vmwareapi_conn.py:409 +#, python-format +msgid "In vmwareapi:_poll_task, Got this error %s" +msgstr "" + +#: cinder/virt/xenapi_conn.py:140 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: cinder/virt/xenapi_conn.py:329 cinder/virt/libvirt/connection.py:472 +msgid "Could not determine iscsi initiator name" +msgstr "" + +#: cinder/virt/xenapi_conn.py:460 +msgid "Host startup on XenServer is not supported." +msgstr "" + +#: cinder/virt/xenapi_conn.py:489 +msgid "Unable to log in to XenAPI (is the Dom0 disk full?)" +msgstr "" + +#: cinder/virt/xenapi_conn.py:527 +msgid "Host is member of a pool, but DB says otherwise" +msgstr "" + +#: cinder/virt/xenapi_conn.py:599 cinder/virt/xenapi_conn.py:612 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:93 +msgid "No domains exist." +msgstr "" + +#: cinder/virt/baremetal/dom.py:95 +#, python-format +msgid "============= initial domains =========== : %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:99 +msgid "Building domain: to be removed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:103 +msgid "Not running domain: remove" +msgstr "" + +#: cinder/virt/baremetal/dom.py:111 +msgid "domain running on an unknown node: discarded" +msgstr "" + +#: cinder/virt/baremetal/dom.py:127 +#, python-format +msgid "No such domain (%s)" +msgstr "" + +#: cinder/virt/baremetal/dom.py:134 +#, python-format +msgid "Failed power down Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:143 +msgid "deactivate -> activate fails" +msgstr "" + +#: cinder/virt/baremetal/dom.py:153 +msgid "destroy_domain: no such domain" +msgstr "" + +#: cinder/virt/baremetal/dom.py:154 +#, python-format +msgid "No such domain %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:161 +#, python-format +msgid "Domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:163 +#, python-format +msgid "Nodes: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:166 +#, python-format +msgid "After storing domains: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:169 +msgid "deactivation/removing domain failed" +msgstr "" + +#: cinder/virt/baremetal/dom.py:176 +msgid "===== Domain is being created =====" +msgstr "" + +#: cinder/virt/baremetal/dom.py:179 +msgid "Same domain name already exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:181 +msgid "create_domain: before get_idle_node" +msgstr "" + +#: cinder/virt/baremetal/dom.py:198 +#, python-format +msgid "Created new domain: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:213 +#, python-format +msgid "Failed to boot Bare-metal node %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:222 +msgid "No such domain exists" +msgstr "" + +#: cinder/virt/baremetal/dom.py:226 +#, python-format +msgid "change_domain_state: to new state %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:233 +#, python-format +msgid "Stored fake domains to the file: %s" +msgstr "" + +#: cinder/virt/baremetal/dom.py:244 +msgid "domain does not exist" +msgstr "" + +#: cinder/virt/baremetal/nodes.py:42 +#, python-format +msgid "Unknown baremetal driver %(d)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:148 +#, python-format +msgid "Error encountered when destroying instance '%(name)s': %(ex)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:162 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:189 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:192 +msgid "_wait_for_reboot failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:222 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:225 +msgid "_wait_for_rescue failed" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:242 +msgid "<============= spawn of baremetal =============>" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:255 +#, python-format +msgid "instance %s: is building" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:260 +msgid "Key is injected but instance is not running yet" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:265 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:268 +#, python-format +msgid "~~~~~~ current state = %s ~~~~~~" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:269 +#, python-format +msgid "instance %s spawned successfully" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:272 +#, python-format +msgid "instance %s:not booted" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:274 +msgid "Bremetal assignment is overcommitted." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:354 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:473 +#, python-format +msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:484 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image " +"%(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:529 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:531 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:574 cinder/virt/libvirt/connection.py:1815 +msgid "" +"Cannot get the number of cpu, because this function is not implemented " +"for this platform. This error can be safely ignored for now." +msgstr "" + +#: cinder/virt/baremetal/proxy.py:714 +#, python-format +msgid "#### RLK: cpu_arch = %s " +msgstr "" + +#: cinder/virt/baremetal/proxy.py:746 +msgid "Updating!" +msgstr "" + +#: cinder/virt/baremetal/proxy.py:773 cinder/virt/libvirt/connection.py:2609 +#: cinder/virt/xenapi/host.py:129 +msgid "Updating host stats" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:185 +msgid "free_node...." +msgstr "" + +#: cinder/virt/baremetal/tilera.py:216 +#, python-format +msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:221 +msgid "status of node is set to 0" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:232 +msgid "rootfs is already removed" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:264 +msgid "Before ping to the bare-metal node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:275 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:279 +#, python-format +msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:292 +msgid "Noting to do for tilera nodes: vmlinux is in CF" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:316 +msgid "activate_node" +msgstr "" + +#: cinder/virt/baremetal/tilera.py:330 +msgid "Node is unknown error state." +msgstr "" + +#: cinder/virt/disk/api.py:165 +msgid "no capable image handler configured" +msgstr "" + +#: cinder/virt/disk/api.py:178 +#, python-format +msgid "unknown disk image handler: %s" +msgstr "" + +#: cinder/virt/disk/api.py:189 +msgid "image already mounted" +msgstr "" + +#: cinder/virt/disk/api.py:276 cinder/virt/disk/guestfs.py:64 +#: cinder/virt/disk/guestfs.py:78 cinder/virt/disk/mount.py:100 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: cinder/virt/disk/api.py:291 +#, python-format +msgid "Failed to remove container: %s" +msgstr "" + +#: cinder/virt/disk/api.py:441 +#, python-format +msgid "User %(username)s not found in password file." +msgstr "" + +#: cinder/virt/disk/api.py:457 +#, python-format +msgid "User %(username)s not found in shadow file." +msgstr "" + +#: cinder/virt/disk/guestfs.py:39 +#, python-format +msgid "unsupported partition: %s" +msgstr "" + +#: cinder/virt/disk/guestfs.py:77 +msgid "unknown guestmount error" +msgstr "" + +#: cinder/virt/disk/loop.py:30 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: cinder/virt/disk/mount.py:76 +msgid "no partitions found" +msgstr "" + +#: cinder/virt/disk/mount.py:77 +#, python-format +msgid "Failed to map partitions: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:58 +msgid "nbd unavailable: module not loaded" +msgstr "" + +#: cinder/virt/disk/nbd.py:63 +msgid "No free nbd devices" +msgstr "" + +#: cinder/virt/disk/nbd.py:81 +#, python-format +msgid "qemu-nbd error: %s" +msgstr "" + +#: cinder/virt/disk/nbd.py:93 +#, python-format +msgid "nbd device %s did not show up" +msgstr "" + +#: cinder/virt/libvirt/connection.py:265 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:286 +msgid "Connection to libvirt broke" +msgstr "" + +#: cinder/virt/libvirt/connection.py:388 +#, python-format +msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:400 +#, python-format +msgid "" +"Error from libvirt during saved instance removal. Code=%(errcode)s " +"Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:411 +#, python-format +msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:423 +msgid "Instance destroyed successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:435 +#, python-format +msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:461 +#, python-format +msgid "Deleting instance files %(target)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:554 +msgid "attaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:567 +msgid "detaching LXC block device" +msgstr "" + +#: cinder/virt/libvirt/connection.py:692 +msgid "Instance soft rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:696 +msgid "Failed to soft reboot instance." +msgstr "" + +#: cinder/virt/libvirt/connection.py:725 +msgid "Instance shutdown successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:761 cinder/virt/libvirt/connection.py:905 +msgid "During reboot, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:766 +msgid "Instance rebooted successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:867 cinder/virt/xenapi/vmops.py:1358 +#, python-format +msgid "" +"Found %(migration_count)d unconfirmed migrations older than " +"%(confirm_window)d seconds" +msgstr "" + +#: cinder/virt/libvirt/connection.py:871 +#, python-format +msgid "Automatically confirming migration %d" +msgstr "" + +#: cinder/virt/libvirt/connection.py:896 +msgid "Instance is running" +msgstr "" + +#: cinder/virt/libvirt/connection.py:910 +msgid "Instance spawned successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:926 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: cinder/virt/libvirt/connection.py:978 +#, fuzzy +msgid "Guest does not have a console available" +msgstr "使用者並沒有管理者權力" + +#: cinder/virt/libvirt/connection.py:1020 +#, python-format +msgid "Path '%(path)s' supports direct I/O" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1024 +#, python-format +msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1028 cinder/virt/libvirt/connection.py:1032 +#, python-format +msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1153 +msgid "Creating image" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1339 +#, python-format +msgid "Injecting %(injection)s into image %(img_id)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1349 +#, python-format +msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1381 +#, python-format +msgid "block_device_list %s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1658 +msgid "Starting toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1662 +msgid "Finished toXML method" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1679 +#, python-format +msgid "" +"Error from libvirt while looking up %(instance_name)s: [Error Code " +"%(error_code)s] %(ex)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1920 +msgid "libvirt version is too old (does not support getVersion)" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1942 +#, python-format +msgid "'' must be 1, but %d\n" +msgstr "" + +#: cinder/virt/libvirt/connection.py:1969 +#, python-format +msgid "topology (%(topology)s) must have %(ks)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2067 +#, python-format +msgid "" +"Instance launched has CPU info:\n" +"%s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2079 +#, python-format +msgid "" +"CPU doesn't have compatibility.\n" +"\n" +"%(ret)s\n" +"\n" +"Refer to %(u)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2136 +#, python-format +msgid "Timeout migrating for %s. nwfilter not found." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2352 +#, python-format +msgid "skipping %(path)s since it looks like volume" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2407 +#, python-format +msgid "Getting disk size of %(i_name)s: %(e)s" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2458 +#, python-format +msgid "Instance %s: Starting migrate_disk_and_power_off" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2513 +msgid "During wait running, instance disappeared." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2518 +msgid "Instance running successfully." +msgstr "" + +#: cinder/virt/libvirt/connection.py:2525 +#, python-format +msgid "Instance %s: Starting finish_migration" +msgstr "" + +#: cinder/virt/libvirt/connection.py:2565 +#, python-format +msgid "Instance %s: Starting finish_revert_migration" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:42 +msgid "" +"Libvirt module could not be loaded. NWFilterFirewall will not work " +"correctly." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:93 +msgid "Called setup_basic_filtering in nwfilter" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:101 +msgid "Ensuring static filters" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:171 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:217 +#, python-format +msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found." +msgstr "" + +#: cinder/virt/libvirt/firewall.py:233 +msgid "iptables firewall: Setup Basic Filtering" +msgstr "" + +#: cinder/virt/libvirt/firewall.py:252 +msgid "Attempted to unfilter instance which is not filtered" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:170 +#, python-format +msgid "%s is a valid instance name" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:173 +#, python-format +msgid "%s has a disk file" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:175 +#, python-format +msgid "Instance %(instance)s is backed by %(backing)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:186 +#, python-format +msgid "" +"Instance %(instance)s is using a backing file %(backing)s which does not " +"appear in the image service" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:237 +#, python-format +msgid "%(id)s (%(base_file)s): image verification failed" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:247 +#, python-format +msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:266 +#, python-format +msgid "Cannot remove %(base_file)s, it does not exist" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:278 +#, python-format +msgid "Base file too young to remove: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:281 +#, python-format +msgid "Removing base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:288 +#, python-format +msgid "Failed to remove %(base_file)s, error was %(error)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:299 +#, python-format +msgid "%(id)s (%(base_file)s): checking" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:318 +#, python-format +msgid "" +"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d " +"on other nodes" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:330 +#, python-format +msgid "" +"%(id)s (%(base_file)s): warning -- an absent base file is in use! " +"instances: %(instance_list)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:338 +#, python-format +msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:348 +#, python-format +msgid "%(id)s (%(base_file)s): image is not in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:354 +#, python-format +msgid "%(id)s (%(base_file)s): image is in use" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:377 +#, python-format +msgid "Skipping verification, no base directory at %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:381 +msgid "Verify base images" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:388 +#, python-format +msgid "Image id %(id)s yields fingerprint %(fingerprint)s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:406 +#, python-format +msgid "Unknown base file: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:411 +#, python-format +msgid "Active base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:414 +#, python-format +msgid "Corrupt base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:418 +#, python-format +msgid "Removable base files: %s" +msgstr "" + +#: cinder/virt/libvirt/imagecache.py:426 +msgid "Verification complete" +msgstr "" + +#: cinder/virt/libvirt/utils.py:264 +msgid "Unable to find an open port" +msgstr "" + +#: cinder/virt/libvirt/vif.py:90 +#, python-format +msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:99 +#, python-format +msgid "Ensuring bridge %s" +msgstr "" + +#: cinder/virt/libvirt/vif.py:165 cinder/virt/libvirt/vif.py:220 +#, python-format +msgid "Failed while unplugging vif of instance '%s'" +msgstr "" + +#: cinder/virt/libvirt/volume.py:163 +#, python-format +msgid "iSCSI device not found at %s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:166 +#, python-format +msgid "" +"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. " +"Try number: %(tries)s" +msgstr "" + +#: cinder/virt/libvirt/volume.py:178 +#, python-format +msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)" +msgstr "" + +#: cinder/virt/vmwareapi/error_util.py:93 +#, python-format +msgid "Error(s) %s occurred in the call to RetrieveProperties" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:44 cinder/virt/xenapi/fake.py:77 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:131 +#, python-format +msgid "Property %(attr)s not set for the managed object %(objName)s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:437 +msgid "There is no VM registered" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:439 cinder/virt/vmwareapi/fake.py:609 +#, python-format +msgid "Virtual Machine with ref %s is not there" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:502 +#, python-format +msgid "Logging out a session that is invalid or already logged out: %s" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:517 +msgid "Session is faulty" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:520 +msgid "Session Invalid" +msgstr "" + +#: cinder/virt/vmwareapi/fake.py:606 +msgid " No Virtual Machine has been registered yet" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:99 +#, python-format +msgid "Glance image %s is in killed state" +msgstr "" + +#: cinder/virt/vmwareapi/io_util.py:107 +#, python-format +msgid "Glance image %(image_id)s is in unknown state - %(state)s" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:128 +msgid "" +"ESX SOAP server returned an empty port group for the host system in its " +"response" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:155 +#, python-format +msgid "Creating Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/network_utils.py:169 +#, python-format +msgid "Created Port Group with name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/read_write_util.py:150 +#, python-format +msgid "Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:84 +msgid "Unable to import suds." +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:90 +msgid "Must specify vmwareapi_wsdl_loc" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:145 +#, python-format +msgid "No such SOAP method '%s' provided by VI SDK" +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:150 +#, python-format +msgid "httplib error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:157 +#, python-format +msgid "Socket error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:162 +#, python-format +msgid "Type error in %s: " +msgstr "" + +#: cinder/virt/vmwareapi/vim.py:166 +#, python-format +msgid "Exception in %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:66 +msgid "Getting list of instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:82 +#, python-format +msgid "Got total of %s instances" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:126 +msgid "Couldn't get a local Datastore reference" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:196 +#, python-format +msgid "Creating VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:205 +#, python-format +msgid "Created VM with the name %s on the ESX host" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:234 +#, python-format +msgid "" +"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter " +"type %(adapter_type)s on the ESX host local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:251 +#, python-format +msgid "" +"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host " +"local store %(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:260 +#, python-format +msgid "" +"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:272 +#, python-format +msgid "" +"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:283 +#, python-format +msgid "" +"Downloading image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:298 +#, python-format +msgid "" +"Downloaded image file data %(image_ref)s to the ESX data store " +"%(data_store_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:315 +#, python-format +msgid "Reconfiguring VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:322 +#, python-format +msgid "Reconfigured VM instance %s to attach the image disk" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:329 +#, python-format +msgid "Powering on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:335 +#, python-format +msgid "Powered on the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:381 +#, python-format +msgid "Creating Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:391 +#, python-format +msgid "Created Snapshot of the VM instance %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:434 +#, python-format +msgid "Copying disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:447 +#, python-format +msgid "Copied disk data before snapshot of the VM instance %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:456 +#, python-format +msgid "Uploading image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:469 +#, python-format +msgid "Uploaded image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:479 +#, python-format +msgid "Deleting temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:488 +#, python-format +msgid "Deleted temporary vmdk file %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:520 +msgid "instance is not powered on" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:527 +#, python-format +msgid "Rebooting guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:530 +#, python-format +msgid "Rebooted guest OS of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:532 +#, python-format +msgid "Doing hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:536 +#, python-format +msgid "Did hard reboot of VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:548 +#, python-format +msgid "instance - %s not present" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:567 +#, python-format +msgid "Powering off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:572 +#, python-format +msgid "Powered off the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:576 +#, python-format +msgid "Unregistering the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:579 +#, python-format +msgid "Unregistered the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:581 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while un-registering the " +"VM: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:592 +#, python-format +msgid "Deleting contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:602 +#, python-format +msgid "Deleted contents of the VM %(name)s from datastore %(datastore_name)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:607 +#, python-format +msgid "" +"In vmwareapi:vmops:destroy, got this exception while deleting the VM " +"contents from the disk: %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:615 +msgid "pause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:619 +msgid "unpause not supported for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:633 +#, python-format +msgid "Suspending the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:637 +#, python-format +msgid "Suspended the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:640 +msgid "instance is powered off and can not be suspended." +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:643 +#, python-format +msgid "VM %s was already in suspended state. So returning without doing anything" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:656 +#, python-format +msgid "Resuming the VM %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:661 +#, python-format +msgid "Resumed the VM %s " +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:663 +msgid "instance is not in a suspended state" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:699 +msgid "get_diagnostics not implemented for vmwareapi" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:757 +#, python-format +msgid "" +"Reconfiguring VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:765 +#, python-format +msgid "" +"Reconfigured VM instance %(name)s to set the machine id with ip - " +"%(ip_addr)s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:802 +#, python-format +msgid "Creating directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmops.py:806 +#, python-format +msgid "Created directory with path %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:89 +#, python-format +msgid "Downloading image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:103 +#, python-format +msgid "Downloaded image %s from glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:108 +#, python-format +msgid "Uploading image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:129 +#, python-format +msgid "Uploaded image %s to the Glance image server" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:139 +#, python-format +msgid "Getting image size for the image %s" +msgstr "" + +#: cinder/virt/vmwareapi/vmware_images.py:143 +#, python-format +msgid "Got image size of %(size)s for the image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:553 cinder/virt/xenapi/fake.py:652 +#: cinder/virt/xenapi/fake.py:670 cinder/virt/xenapi/fake.py:732 +msgid "Raising NotImplemented" +msgstr "" + +#: cinder/virt/xenapi/fake.py:555 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:589 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:594 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: cinder/virt/xenapi/fake.py:654 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: cinder/virt/xenapi/host.py:67 +#, python-format +msgid "" +"Instance %(name)s running on %(host)s could not be found in the database:" +" assuming it is a worker VM and skipping migration to a new host" +msgstr "" + +#: cinder/virt/xenapi/host.py:137 +#, python-format +msgid "Unable to get SR for this host: %s" +msgstr "" + +#: cinder/virt/xenapi/host.py:169 +msgid "Unable to get updated status" +msgstr "" + +#: cinder/virt/xenapi/host.py:172 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s." +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:37 +#, python-format +msgid "Found non-unique network for name_label %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:55 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/network_utils.py:58 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:111 +#, python-format +msgid "Unable to eject %(host)s from the pool; pool not empty" +msgstr "" + +#: cinder/virt/xenapi/pool.py:126 +#, python-format +msgid "Unable to eject %(host)s from the pool; No master found" +msgstr "" + +#: cinder/virt/xenapi/pool.py:143 +#, python-format +msgid "Pool-Join failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:146 +#, python-format +msgid "Unable to join %(host)s in the pool" +msgstr "" + +#: cinder/virt/xenapi/pool.py:162 +#, python-format +msgid "Pool-eject failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/pool.py:174 +#, fuzzy, python-format +msgid "Unable to set up pool: %(e)s." +msgstr "無法卸載 Volume %s" + +#: cinder/virt/xenapi/pool.py:185 +#, python-format +msgid "Pool-set_name_label failed: %(e)s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:103 +#, python-format +msgid "Found no PIF for device %s" +msgstr "" + +#: cinder/virt/xenapi/vif.py:122 +#, python-format +msgid "" +"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. " +"Expected %(vlan_num)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:218 +msgid "Created VM" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:245 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:262 +#, python-format +msgid "VBD %s already detached" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:265 +#, python-format +msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:270 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:275 +#, python-format +msgid "Reached maximum number of retries trying to unplug VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:305 +#, python-format +msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:308 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:319 +#, python-format +msgid "Unable to destroy VDI %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:336 +#, python-format +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)" +" on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:345 +#, python-format +msgid "Copied VDI %(vdi_ref)s from VDI %(vdi_to_copy_ref)s on %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:353 +#, python-format +msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:372 +#, python-format +msgid "No primary VDI found for %(vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:379 +#, python-format +msgid "Snapshotting with label '%(label)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:392 +#, python-format +msgid "Created snapshot %(template_vm_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:583 +#, python-format +msgid "Creating blank HD of size %(req_size)d gigs" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:634 +#, python-format +msgid "" +"Fast cloning is only supported on default local SR of type ext. SR on " +"this system was found to be of type %(sr_type)s. Ignoring the cow flag." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:724 +#, python-format +msgid "" +"download_vhd %(image)s attempt %(attempt_num)d/%(max_attempts)d from " +"%(glance_host)s:%(glance_port)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:734 +#, python-format +msgid "download_vhd failed: %r" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:750 +#, python-format +msgid "Asking xapi to fetch vhd image %(image)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:760 +#, python-format +msgid "" +"xapi 'download_vhd' returned VDI of type '%(vdi_type)s' with UUID " +"'%(vdi_uuid)s'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:789 +#, python-format +msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:805 +#, python-format +msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:809 +#, python-format +msgid "" +"Image size %(size_bytes)d exceeded instance_type allowed size " +"%(allowed_size_bytes)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:831 +#, python-format +msgid "Fetching image %(image)s, type %(image_type_str)" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:844 +#, python-format +msgid "Size for image %(image)s: %(virtual_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:853 +#, python-format +msgid "" +"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d " +"bytes" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:870 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:884 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:895 +msgid "Failed to fetch glance image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:934 +#, python-format +msgid "Detected %(image_type_str)s format for image %(image_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:955 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:973 +#, python-format +msgid "Unknown image format %(disk_image_type)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1016 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1059 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1061 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1088 +#, python-format +msgid "Unable to parse rrd of %(vm_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1108 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1136 +#, python-format +msgid "Flag sr_matching_filter '%s' does not respect formatting convention" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1154 +msgid "" +"XenAPI is unable to find a Storage Repository to install guest instances " +"on. Please check your configuration and/or configure the flag " +"'sr_matching_filter'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1167 +msgid "Cannot find SR of content-type ISO" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1175 +#, python-format +msgid "ISO: looking at SR %(sr_rec)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1177 +msgid "ISO: not iso content" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1180 +msgid "ISO: iso content_type, no 'i18n-key' key" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1183 +msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1187 +msgid "ISO: SR MATCHing our criteria" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1189 +msgid "ISO: ISO, looking to see if it is host local" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1192 +#, python-format +msgid "ISO: PBD %(pbd_ref)s disappeared" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1195 +#, python-format +msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1198 +msgid "ISO: SR with local PBD" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1220 +#, python-format +msgid "" +"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: " +"%(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1236 +#, python-format +msgid "Unable to obtain RRD XML updates with server details: %(server)s." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1290 +#, python-format +msgid "Invalid statistics data from Xenserver: %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1343 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1417 +#, python-format +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1427 +#, python-format +msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1462 +#, python-format +msgid "Timeout waiting for device %s to be created" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1473 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1476 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1478 +#, python-format +msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1481 +#, python-format +msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1490 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1498 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1511 +#, python-format +msgid "Running pygrub against %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1518 +#, python-format +msgid "Found Xen kernel %s" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1520 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1533 +msgid "Partitions:" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1539 +#, python-format +msgid " %(num)s: %(ptype)s %(size)d sectors" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1565 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to " +"%(dev_path)s..." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1578 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1632 +#, python-format +msgid "" +"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " +"virtual_size=%(virtual_size)d block_size=%(block_size)d" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1664 +#, python-format +msgid "" +"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% " +"reduction in size" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1714 +msgid "" +"XenServer tools installed in this image are capable of network injection." +" Networking files will not bemanipulated" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1722 +msgid "" +"XenServer tools are present in this image but are not capable of network " +"injection" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1726 +msgid "XenServer tools are not installed in this image" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1742 +msgid "Manipulating interface files directly" +msgstr "" + +#: cinder/virt/xenapi/vm_utils.py:1751 +#, python-format +msgid "Failed to mount filesystem (expected for non-linux instances): %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:131 cinder/virt/xenapi/vmops.py:722 +#, python-format +msgid "Updating progress to %(progress)d" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:231 +#, python-format +msgid "Attempted to power on non-existent instance bad instance id %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:233 +msgid "Starting instance" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:303 +msgid "Removing kernel/ramdisk files from dom0" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:358 +msgid "Failed to spawn, rolling back" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:443 +msgid "Detected ISO image type, creating blank VM for install" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:462 +msgid "Auto configuring disk, attempting to resize partition..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:515 +#, python-format +msgid "Invalid value for injected_files: %r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:520 +#, python-format +msgid "Injecting file path: '%s'" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:527 +msgid "Setting admin password" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:531 +msgid "Resetting network" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:538 +msgid "Setting VCPU weight" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:544 +msgid "Starting VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:551 +#, python-format +msgid "" +"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is " +"%(version)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:554 +#, python-format +msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:561 +msgid "Waiting for instance state to become running" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:573 +msgid "Querying agent version" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:576 +#, python-format +msgid "Instance agent version: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:581 +#, python-format +msgid "Updating Agent to %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:616 +#, python-format +msgid "No opaque_ref could be determined for '%s'." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:670 +msgid "Finished snapshot and upload for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:677 +msgid "Starting snapshot for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:686 +#, fuzzy, python-format +msgid "Unable to Snapshot instance: %(exc)s" +msgstr "無法掛載Volume 到虛擬機器 %s" + +#: cinder/virt/xenapi/vmops.py:702 +msgid "Failed to transfer vhd to new host" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:770 +#, python-format +msgid "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:893 +#, python-format +msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:901 +msgid "Resize complete" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:928 +#, python-format +msgid "Failed to query agent version: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:949 +#, python-format +msgid "domid changed from %(domid)s to %(newdomid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:962 +#, python-format +msgid "Failed to update agent: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:983 +#, python-format +msgid "Failed to exchange keys: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:998 +#, python-format +msgid "Failed to update password: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1023 +#, python-format +msgid "Failed to inject file: %(resp)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1032 +msgid "VM already halted, skipping shutdown..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1036 +msgid "Shutting down VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1054 +msgid "Unable to find VBD for VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1097 +msgid "Using RAW or VHD, skipping kernel and ramdisk deletion" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1104 +msgid "instance has a kernel or ramdisk but not both" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1111 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1121 +msgid "VM destroyed" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1147 +msgid "Destroying VM" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1169 +msgid "VM is not present, skipping destroy..." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1222 +#, python-format +msgid "Instance is already in Rescue Mode: %s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1296 +#, python-format +msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1300 +msgid "Automatically hard rebooting" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1363 +#, python-format +msgid "Setting migration %(migration_id)s to error: %(reason)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1374 +#, python-format +msgid "" +"Automatically confirming migration %(migration_id)s for instance " +"%(instance_uuid)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1379 +#, python-format +msgid "Instance %(instance_uuid)s not found" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1383 +msgid "In ERROR state" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1389 +#, python-format +msgid "In %(task_state)s task_state, not RESIZE_VERIFY" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1396 +#, python-format +msgid "Error auto-confirming resize: %(e)s. Will retry later." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1418 +msgid "Could not get bandwidth info." +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1469 +msgid "Injecting network info to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1483 +msgid "Creating vifs" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1492 +#, python-format +msgid "Creating VIF for network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1495 +#, python-format +msgid "Created VIF %(vif_ref)s, network %(network_ref)s" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1520 +msgid "Injecting hostname to xenstore" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1545 +#, python-format +msgid "" +"The agent call to %(method)s returned an invalid response: %(ret)r. " +"path=%(path)s; args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1566 +#, python-format +msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1570 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. " +"args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1575 +#, python-format +msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r" +msgstr "" + +#: cinder/virt/xenapi/vmops.py:1661 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:52 +msgid "creating sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:55 cinder/virt/xenapi/volume_utils.py:83 +#, python-format +msgid "type is = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:58 cinder/virt/xenapi/volume_utils.py:86 +#, python-format +msgid "name = %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:71 +#, python-format +msgid "Created %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:76 cinder/virt/xenapi/volume_utils.py:174 +msgid "Unable to create Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:80 +msgid "introducing sr within volume_utils" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:103 cinder/virt/xenapi/volume_utils.py:170 +#: cinder/virt/xenapi/volumeops.py:156 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:106 +msgid "Creating pbd for SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:108 +msgid "Plugging SR" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:116 cinder/virt/xenapi/volumeops.py:160 +msgid "Unable to introduce Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:127 cinder/virt/xenapi/volumeops.py:50 +msgid "Unable to get SR using uuid" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:129 +#, python-format +msgid "Forgetting SR %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:137 +msgid "Unable to forget Storage Repository" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:157 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:186 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:204 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:210 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:234 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:242 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:264 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:274 +#, python-format +msgid "Error finding vdis in SR %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:281 +#, python-format +msgid "Unable to find vbd for vdi %s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:315 +#, python-format +msgid "Unable to obtain target information %(data)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volume_utils.py:341 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:64 +msgid "Could not find VDI ref" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Creating SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:73 +msgid "Could not create SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:76 +msgid "Could not retrieve SR record" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Introducing SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:85 +msgid "SR found in xapi database. No need to introduce" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:90 +msgid "Could not introduce SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:94 +#, python-format +msgid "Checking for SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:106 +#, python-format +msgid "SR %s not found in the xapi database" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:112 +msgid "Could not forget SR" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:178 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "無法替 instance實例 %(instance_name)s , 建立 VDI 在SR %(sr_ref)s" + +#: cinder/virt/xenapi/volumeops.py:189 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "無法替 instance實例 %(instance_name)s , 使用SR %(sr_ref)s" + +#: cinder/virt/xenapi/volumeops.py:197 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "無法掛載Volume 到虛擬機器 %s" + +#: cinder/virt/xenapi/volumeops.py:200 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "掛載點 %(mountpoint)s 掛載到虛擬機器 %(instance_name)s" + +#: cinder/virt/xenapi/volumeops.py:210 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "卸載_Volume: %(instance_name)s, %(mountpoint)s" + +#: cinder/virt/xenapi/volumeops.py:219 +#, python-format +msgid "Unable to locate volume %s" +msgstr "找不到Volume %s" + +#: cinder/virt/xenapi/volumeops.py:227 +#, python-format +msgid "Unable to detach volume %s" +msgstr "無法卸載 Volume %s" + +#: cinder/virt/xenapi/volumeops.py:232 +#, python-format +msgid "Unable to destroy vbd %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:239 +#, python-format +msgid "Error purging SR %s" +msgstr "" + +#: cinder/virt/xenapi/volumeops.py:241 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "掛載點 %(mountpoint)s 從虛擬機器 %(instance_name)s 卸載" + +#: cinder/vnc/xvp_proxy.py:98 cinder/vnc/xvp_proxy.py:103 +#, python-format +msgid "Error in handshake: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:119 +#, python-format +msgid "Invalid request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:139 +#, python-format +msgid "Request: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:142 +#, python-format +msgid "Request made with missing token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:153 +#, python-format +msgid "Request made with invalid token: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:160 +#, python-format +msgid "Unexpected error: %s" +msgstr "" + +#: cinder/vnc/xvp_proxy.py:180 +#, python-format +msgid "Starting cinder-xvpvncproxy node (version %s)" +msgstr "" + +#: cinder/volume/api.py:74 cinder/volume/api.py:220 +msgid "status must be available" +msgstr "" + +#: cinder/volume/api.py:85 +#, python-format +msgid "Quota exceeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: cinder/volume/api.py:137 +#, fuzzy +msgid "Volume status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/api.py:142 +#, python-format +msgid "Volume still has %d dependent snapshots" +msgstr "" + +#: cinder/volume/api.py:223 +msgid "already attached" +msgstr "" + +#: cinder/volume/api.py:230 +msgid "already detached" +msgstr "" + +#: cinder/volume/api.py:292 +msgid "must be available" +msgstr "" + +#: cinder/volume/api.py:325 +#, fuzzy +msgid "Volume Snapshot status must be available or error" +msgstr "Volume 狀態需要可被使用" + +#: cinder/volume/driver.py:96 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: cinder/volume/driver.py:106 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: cinder/volume/driver.py:270 +#, python-format +msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:318 +#, python-format +msgid "Skipping remove_export. No iscsi_target provisioned for volume: %d" +msgstr "" + +#: cinder/volume/driver.py:327 +#, python-format +msgid "" +"Skipping remove_export. No iscsi_target is presently exported for volume:" +" %d" +msgstr "" + +#: cinder/volume/driver.py:337 +msgid "ISCSI provider_location not stored, using discovery" +msgstr "" + +#: cinder/volume/driver.py:384 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" + +#: cinder/volume/driver.py:388 +#, python-format +msgid "ISCSI Discovery: Found %s" +msgstr "" + +#: cinder/volume/driver.py:466 +#, python-format +msgid "Cannot confirm exported volume id:%(volume_id)s." +msgstr "" + +#: cinder/volume/driver.py:493 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: cinder/volume/driver.py:505 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: cinder/volume/driver.py:579 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: cinder/volume/driver.py:581 +msgid "Sheepdog is not working" +msgstr "" + +#: cinder/volume/driver.py:680 cinder/volume/driver.py:685 +#, python-format +msgid "LoggingVolumeDriver: %s" +msgstr "" + +#: cinder/volume/manager.py:96 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: cinder/volume/manager.py:101 +#, python-format +msgid "volume %s: skipping export" +msgstr "" + +#: cinder/volume/manager.py:107 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: cinder/volume/manager.py:119 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" + +#: cinder/volume/manager.py:131 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: cinder/volume/manager.py:144 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:153 +msgid "Volume is still attached" +msgstr "" + +#: cinder/volume/manager.py:155 +msgid "Volume is not local to this node" +msgstr "" + +#: cinder/volume/manager.py:159 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: cinder/volume/manager.py:161 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:164 +#, python-format +msgid "volume %s: volume is busy" +msgstr "" + +#: cinder/volume/manager.py:176 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:183 +#, python-format +msgid "snapshot %s: creating" +msgstr "" + +#: cinder/volume/manager.py:187 +#, python-format +msgid "snapshot %(snap_name)s: creating" +msgstr "" + +#: cinder/volume/manager.py:202 +#, python-format +msgid "snapshot %s: created successfully" +msgstr "" + +#: cinder/volume/manager.py:211 +#, python-format +msgid "snapshot %s: deleting" +msgstr "" + +#: cinder/volume/manager.py:214 +#, python-format +msgid "snapshot %s: snapshot is busy" +msgstr "" + +#: cinder/volume/manager.py:226 +#, python-format +msgid "snapshot %s: deleted successfully" +msgstr "" + +#: cinder/volume/manager.py:310 +msgid "Checking volume capabilities" +msgstr "" + +#: cinder/volume/manager.py:314 +#, python-format +msgid "New capabilities found: %s" +msgstr "" + +#: cinder/volume/manager.py:325 +msgid "Clear capabilities" +msgstr "" + +#: cinder/volume/manager.py:329 +#, python-format +msgid "Notification {%s} received" +msgstr "" + +#: cinder/volume/netapp.py:79 +#, python-format +msgid "API %(name)sfailed: %(reason)s" +msgstr "" + +#: cinder/volume/netapp.py:109 +#, python-format +msgid "%s is not set" +msgstr "" + +#: cinder/volume/netapp.py:128 +msgid "Connected to DFM server" +msgstr "" + +#: cinder/volume/netapp.py:159 +#, python-format +msgid "Job failed: %s" +msgstr "" + +#: cinder/volume/netapp.py:240 +msgid "Failed to provision dataset member" +msgstr "" + +#: cinder/volume/netapp.py:252 +msgid "No LUN was created by the provision job" +msgstr "" + +#: cinder/volume/netapp.py:261 cinder/volume/netapp.py:433 +#, fuzzy, python-format +msgid "Failed to find LUN ID for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/netapp.py:280 +msgid "Failed to remove and delete dataset member" +msgstr "" + +#: cinder/volume/netapp.py:603 cinder/volume/netapp.py:657 +#, fuzzy, python-format +msgid "No LUN ID for volume %s" +msgstr "找不到Volume %s" + +#: cinder/volume/netapp.py:607 cinder/volume/netapp.py:661 +#, python-format +msgid "Failed to get LUN details for LUN ID %s" +msgstr "" + +#: cinder/volume/netapp.py:614 +#, python-format +msgid "Failed to get host details for host ID %s" +msgstr "" + +#: cinder/volume/netapp.py:620 +#, python-format +msgid "Failed to get target portal for filer: %s" +msgstr "" + +#: cinder/volume/netapp.py:625 +#, python-format +msgid "Failed to get target IQN for filer: %s" +msgstr "" + +#: cinder/volume/san.py:113 cinder/volume/san.py:151 +msgid "Specify san_password or san_private_key" +msgstr "" + +#: cinder/volume/san.py:156 +msgid "san_ip must be set" +msgstr "" + +#: cinder/volume/san.py:320 +#, python-format +msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s" +msgstr "" + +#: cinder/volume/san.py:452 +#, python-format +msgid "CLIQ command returned %s" +msgstr "" + +#: cinder/volume/san.py:458 +#, python-format +msgid "Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:466 +#, python-format +msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s" +msgstr "" + +#: cinder/volume/san.py:496 +#, python-format +msgid "" +"Unexpected number of virtual ips for cluster %(cluster_name)s. " +"Result=%(_xml)s" +msgstr "" + +#: cinder/volume/san.py:549 +#, python-format +msgid "Volume info: %(volume_name)s => %(volume_attributes)s" +msgstr "" + +#: cinder/volume/san.py:594 +msgid "local_path not supported" +msgstr "" + +#: cinder/volume/san.py:626 +#, python-format +msgid "Could not determine project for volume %s, can't export" +msgstr "" + +#: cinder/volume/san.py:696 +#, python-format +msgid "Payload for SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:713 +#, python-format +msgid "Call to json.loads() raised an exception: %s" +msgstr "" + +#: cinder/volume/san.py:718 +#, python-format +msgid "Results of SolidFire API call: %s" +msgstr "" + +#: cinder/volume/san.py:732 +#, python-format +msgid "Found solidfire account: %s" +msgstr "" + +#: cinder/volume/san.py:746 +#, python-format +msgid "solidfire account: %s does not exist, create it..." +msgstr "" + +#: cinder/volume/san.py:804 +msgid "Enter SolidFire create_volume..." +msgstr "" + +#: cinder/volume/san.py:846 +msgid "Leaving SolidFire create_volume" +msgstr "" + +#: cinder/volume/san.py:861 +msgid "Enter SolidFire delete_volume..." +msgstr "" + +#: cinder/volume/san.py:880 +#, python-format +msgid "Deleting volumeID: %s " +msgstr "" + +#: cinder/volume/san.py:888 +msgid "Leaving SolidFire delete_volume" +msgstr "" + +#: cinder/volume/san.py:891 +msgid "Executing SolidFire ensure_export..." +msgstr "" + +#: cinder/volume/san.py:895 +msgid "Executing SolidFire create_export..." +msgstr "" + +#: cinder/volume/volume_types.py:49 cinder/volume/volume_types.py:108 +msgid "name cannot be None" +msgstr "" + +#: cinder/volume/volume_types.py:96 +msgid "id cannot be None" +msgstr "" + +#: cinder/volume/xensm.py:55 +#, python-format +msgid "SR name = %s" +msgstr "" + +#: cinder/volume/xensm.py:56 +#, python-format +msgid "Params: %s" +msgstr "" + +#: cinder/volume/xensm.py:60 +#, python-format +msgid "Failed to create sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:62 +msgid "Create failed" +msgstr "" + +#: cinder/volume/xensm.py:64 +#, python-format +msgid "SR UUID of new SR is: %s" +msgstr "" + +#: cinder/volume/xensm.py:71 +msgid "Failed to update db" +msgstr "" + +#: cinder/volume/xensm.py:80 +#, python-format +msgid "Failed to introduce sr %s...continuing" +msgstr "" + +#: cinder/volume/xensm.py:91 +#, python-format +msgid "Failed to reach backend %d" +msgstr "" + +#: cinder/volume/xensm.py:100 +msgid "XenSMDriver requires xenapi connection" +msgstr "" + +#: cinder/volume/xensm.py:110 +msgid "Failed to initiate session" +msgstr "" + +#: cinder/volume/xensm.py:142 +#, python-format +msgid "Volume will be created in backend - %d" +msgstr "" + +#: cinder/volume/xensm.py:154 +msgid "Failed to update volume in db" +msgstr "" + +#: cinder/volume/xensm.py:157 +msgid "Unable to create volume" +msgstr "" + +#: cinder/volume/xensm.py:171 +msgid "Failed to delete vdi" +msgstr "" + +#: cinder/volume/xensm.py:177 +msgid "Failed to delete volume in db" +msgstr "" + +#: cinder/volume/xensm.py:210 +msgid "Failed to find volume in db" +msgstr "" + +#: cinder/volume/xensm.py:221 +msgid "Failed to find backend in db" +msgstr "" + +#: cinder/volume/nexenta/__init__.py:27 +msgid "Nexenta SA returned the error" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:64 +#, python-format +msgid "Sending JSON data: %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:69 +#, python-format +msgid "Auto switching to HTTPS connection to %s" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:75 +msgid "No headers in server response" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:76 +msgid "Bad response from server" +msgstr "" + +#: cinder/volume/nexenta/jsonrpc.py:79 +#, python-format +msgid "Got response: %s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:96 +#, python-format +msgid "Volume %s does not exist in Nexenta SA" +msgstr "" + +#: cinder/volume/nexenta/volume.py:180 +msgid "" +"Call to local_path should not happen. Verify that use_local_volumes flag " +"is turned off." +msgstr "" + +#: cinder/volume/nexenta/volume.py:202 +#, python-format +msgid "Ignored target creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:210 +#, python-format +msgid "Ignored target group creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:219 +#, python-format +msgid "Ignored target group member addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:227 +#, python-format +msgid "Ignored LU creation error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:237 +#, python-format +msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export" +msgstr "" + +#: cinder/volume/nexenta/volume.py:273 +#, python-format +msgid "" +"Got error trying to destroy target group %(target_group)s, assuming it is" +" already gone: %(exc)s" +msgstr "" + +#: cinder/volume/nexenta/volume.py:280 +#, python-format +msgid "" +"Got error trying to delete target %(target)s, assuming it is already " +"gone: %(exc)s" +msgstr "" + +#~ msgid "Unable to locate account %(account_name) on Solidfire device" +#~ msgstr "" + +#~ msgid "Zone %(zone_id)s could not be found." +#~ msgstr "" + +#~ msgid "Cinder access parameters were not specified." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(id)d could not be found." +#~ msgstr "" + +#~ msgid "Virtual Storage Array %(name)s could not be found." +#~ msgstr "" + +#~ msgid "Detected more than one volume with name %(vol_name)" +#~ msgstr "" + +#~ msgid "Detected existing vlan with id %(vlan)" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab semaphore \"%(lock)s\" " +#~ "for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "" +#~ "Attempting to grab file lock " +#~ "\"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Got file lock \"%(lock)s\" for method \"%(method)s\"...lock" +#~ msgstr "" + +#~ msgid "Parent group id and group id cannot be same" +#~ msgstr "" + +#~ msgid "No body provided" +#~ msgstr "" + +#~ msgid "Create VSA %(display_name)s of type %(vc_type)s" +#~ msgstr "" + +#~ msgid "Delete VSA with id: %s" +#~ msgstr "" + +#~ msgid "Associate address %(ip)s to VSA %(id)s" +#~ msgstr "" + +#~ msgid "Disassociate address from VSA %(id)s" +#~ msgstr "" + +#~ msgid "%(obj)s with ID %(id)s not found" +#~ msgstr "" + +#~ msgid "" +#~ "%(obj)s with ID %(id)s belongs to " +#~ "VSA %(own_vsa_id)s and not to VSA " +#~ "%(vsa_id)s." +#~ msgstr "" + +#~ msgid "Index. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Detail. vsa_id=%(vsa_id)s" +#~ msgstr "" + +#~ msgid "Create. vsa_id=%(vsa_id)s, body=%(body)s" +#~ msgstr "" + +#~ msgid "Create volume of %(size)s GB from VSA ID %(vsa_id)s" +#~ msgstr "" + +#~ msgid "Update %(obj)s with id: %(id)s, changes: %(changes)s" +#~ msgstr "" + +#~ msgid "Delete. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Show. vsa_id=%(vsa_id)s, id=%(id)s" +#~ msgstr "" + +#~ msgid "Index instances for VSA %s" +#~ msgstr "" + +#~ msgid "Quota exceeded for %(pid)s, tried to run %(min_count)s instances" +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You cannot run" +#~ " any more instances of this type." +#~ msgstr "" + +#~ msgid "" +#~ "Instance quota exceeded. You can only" +#~ " run %s more instances of this " +#~ "type." +#~ msgstr "" + +#~ msgid "Going to try to soft delete %s" +#~ msgstr "" + +#~ msgid "No host for instance %s, deleting immediately" +#~ msgstr "" + +#~ msgid "Going to try to terminate %s" +#~ msgstr "" + +#~ msgid "Going to try to stop %s" +#~ msgstr "" + +#~ msgid "Going to try to start %s" +#~ msgstr "" + +#~ msgid "" +#~ "Going to force the deletion of the" +#~ " vm %(instance_uuid)s, even if it is" +#~ " deleted" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s did not exist " +#~ "in the DB, but I will shut " +#~ "it down anyway using a special " +#~ "context" +#~ msgstr "" + +#~ msgid "exception terminating the instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "trying to destroy already destroyed instance: %s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(name)s found in database but" +#~ " not known by hypervisor. Setting " +#~ "power state to NOSTATE" +#~ msgstr "" + +#~ msgid "" +#~ "Detected instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "" +#~ "Destroying instance with name label " +#~ "'%(name_label)s' which is marked as " +#~ "DELETED but still present on host." +#~ msgstr "" + +#~ msgid "SQL connection failed (%(connstring)s). %(attempts)d attempts left." +#~ msgstr "" + +#~ msgid "Can't downgrade without losing data" +#~ msgstr "" + +#~ msgid "Instance %(instance_id)s not found" +#~ msgstr "" + +#~ msgid "Network %s has active ports, cannot delete" +#~ msgstr "" + +#~ msgid "No fixed IPs to deallocate for vif %sid" +#~ msgstr "" + +#~ msgid "" +#~ "AMQP server on %(fl_host)s:%(fl_port)d is " +#~ "unreachable: %(e)s. Trying again in " +#~ "%(fl_intv)d seconds." +#~ msgstr "" + +#~ msgid "Unable to connect to AMQP server after %(tries)d tries. Shutting down." +#~ msgstr "" + +#~ msgid "Reconnected to queue" +#~ msgstr "" + +#~ msgid "Failed to fetch message from queue: %s" +#~ msgstr "" + +#~ msgid "Initing the Adapter Consumer for %s" +#~ msgstr "" + +#~ msgid "Created \"%(exchange)s\" fanout exchange with \"%(key)s\" routing key" +#~ msgstr "" + +#~ msgid "Exception while processing consumer" +#~ msgstr "" + +#~ msgid "Creating \"%(exchange)s\" fanout exchange" +#~ msgstr "" + +#~ msgid "response %s" +#~ msgstr "" + +#~ msgid "topic is %s" +#~ msgstr "" + +#~ msgid "message %s" +#~ msgstr "" + +#~ msgid "" +#~ "Cannot confirm tmpfile at %(ipath)s is" +#~ " on same shared storage between " +#~ "%(src)s and %(dest)s." +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of memory(host:%(avail)s <= " +#~ "instance:%(mem_inst)s)" +#~ msgstr "" + +#~ msgid "" +#~ "Unable to migrate %(instance_id)s to " +#~ "%(dest)s: Lack of disk(host:%(available)s <=" +#~ " instance:%(necessary)s)" +#~ msgstr "" + +#~ msgid "Driver Method %(driver_method)s missing: %(e)s.Reverting to schedule()" +#~ msgstr "" + +#~ msgid "Setting instance %(instance_uuid)s to ERROR state." +#~ msgstr "" + +#~ msgid "_filter_hosts: %(request_spec)s" +#~ msgstr "" + +#~ msgid "Filter hosts for drive type %s" +#~ msgstr "" + +#~ msgid "Host %s has no free capacity. Skip" +#~ msgstr "" + +#~ msgid "Filter hosts: %s" +#~ msgstr "" + +#~ msgid "Must implement host selection mechanism" +#~ msgstr "" + +#~ msgid "Maximum number of hosts selected (%d)" +#~ msgstr "" + +#~ msgid "Selected excessive host %(host)s" +#~ msgstr "" + +#~ msgid "Provision volume %(name)s of size %(size)s GB on host %(host)s" +#~ msgstr "" + +#~ msgid "volume_params %(volume_params)s" +#~ msgstr "" + +#~ msgid "%(i)d: Volume %(name)s" +#~ msgstr "" + +#~ msgid "Attempting to spawn %(num_volumes)d volume(s)" +#~ msgstr "" + +#~ msgid "Error creating volumes" +#~ msgstr "" + +#~ msgid "Non-VSA volume %d" +#~ msgstr "" + +#~ msgid "Spawning volume %(volume_id)s with drive type %(drive_type)s" +#~ msgstr "" + +#~ msgid "Error creating volume" +#~ msgstr "" + +#~ msgid "No capability selected for volume of size %(size)s" +#~ msgstr "" + +#~ msgid "Host %s:" +#~ msgstr "" + +#~ msgid "" +#~ "\tDrive %(qosgrp)-25s: total %(total)2s, " +#~ "used %(used)2s, free %(free)2s. Available " +#~ "capacity %(avail)-5s" +#~ msgstr "" + +#~ msgid "" +#~ "\t LeastUsedHost: Best host: %(best_host)s." +#~ " (used capacity %(min_used)s)" +#~ msgstr "" + +#~ msgid "" +#~ "\t MostAvailCap: Best host: %(best_host)s. " +#~ "(available %(max_avail)s %(type_str)s)" +#~ msgstr "" + +#~ msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +#~ msgstr "" + +#~ msgid "Publishing to route %s" +#~ msgstr "" + +#~ msgid "Declaring queue %s" +#~ msgstr "" + +#~ msgid "Declaring exchange %s" +#~ msgstr "" + +#~ msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +#~ msgstr "" + +#~ msgid "Getting from %(queue)s: %(message)s" +#~ msgstr "" + +#~ msgid "Test: Emulate wrong VSA name. Raise" +#~ msgstr "" + +#~ msgid "Test: Emulate DB error. Raise" +#~ msgstr "" + +#~ msgid "Test: user_data = %s" +#~ msgstr "" + +#~ msgid "_create: param=%s" +#~ msgstr "" + +#~ msgid "Host %s" +#~ msgstr "" + +#~ msgid "Test: provision vol %(name)s on host %(host)s" +#~ msgstr "" + +#~ msgid "\t vol=%(vol)s" +#~ msgstr "" + +#~ msgid "Test: VSA update request: vsa_id=%(vsa_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume create: %s" +#~ msgstr "" + +#~ msgid "Test: Volume get request: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Test: Volume update request: id=%(volume_id)s values=%(values)s" +#~ msgstr "" + +#~ msgid "Test: Volume get: id=%(volume_id)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: success %(result)s" +#~ msgstr "" + +#~ msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +#~ msgstr "" + +#~ msgid "Unable to get updated status: %s" +#~ msgstr "" + +#~ msgid "" +#~ "deactivate_node is called for " +#~ "node_id = %(id)s node_ip = %(ip)s" +#~ msgstr "" + +#~ msgid "virsh said: %r" +#~ msgstr "" + +#~ msgid "cool, it's a device" +#~ msgstr "" + +#~ msgid "Unable to read LXC console" +#~ msgstr "" + +#~ msgid "" +#~ "to xml...\n" +#~ ":%s " +#~ msgstr "" + +#~ msgid "During wait running, %s disappeared." +#~ msgstr "" + +#~ msgid "Instance %s running successfully." +#~ msgstr "" + +#~ msgid "The nwfilter(%(instance_secgroup_filter_name)s) is not found." +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image verification failed" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): image " +#~ "verification skipped, no hash stored" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): checking" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in use:" +#~ " on this node %(local)d local, " +#~ "%(remote)d on other nodes" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): warning " +#~ "-- an absent base file is in " +#~ "use! instances: %(instance_list)s" +#~ msgstr "" + +#~ msgid "" +#~ "%(container_format)s-%(id)s (%(base_file)s): in: on" +#~ " other nodes (%(remote)d on other " +#~ "nodes)" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is not in use" +#~ msgstr "" + +#~ msgid "%(container_format)s-%(id)s (%(base_file)s): image is in use" +#~ msgstr "" + +#~ msgid "Created VM %s..." +#~ msgstr "" + +#~ msgid "Created VM %(instance_name)s as %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Creating a CDROM-specific VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "" +#~ "Created a CDROM-specific VBD %(vbd_ref)s" +#~ " for VM %(vm_ref)s, VDI %(vdi_ref)s." +#~ msgstr "" + +#~ msgid "No primary VDI found for%(vm_ref)s" +#~ msgstr "" + +#~ msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +#~ msgstr "" + +#~ msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Fetching image %(image)s" +#~ msgstr "" + +#~ msgid "Image Type: %s" +#~ msgstr "" + +#~ msgid "ISO: Found sr possibly containing the ISO image" +#~ msgstr "" + +#~ msgid "Size for image %(image)s:%(virtual_size)d" +#~ msgstr "" + +#~ msgid "instance %s: Failed to fetch glance image" +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s ... " +#~ msgstr "" + +#~ msgid "Creating VBD for VDI %s done." +#~ msgstr "" + +#~ msgid "VBD.unplug successful first time." +#~ msgstr "" + +#~ msgid "VBD.unplug rejected: retrying..." +#~ msgstr "" + +#~ msgid "Not sleeping anymore!" +#~ msgstr "" + +#~ msgid "VBD.unplug successful eventually." +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +#~ msgstr "" + +#~ msgid "Ignoring XenAPI.Failure %s" +#~ msgstr "" + +#~ msgid "Starting instance %s" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn" +#~ msgstr "" + +#~ msgid "Instance %s failed to spawn - performing clean-up" +#~ msgstr "" + +#~ msgid "instance %s: Failed to spawn - Unable to create VM" +#~ msgstr "" + +#~ msgid "" +#~ "Auto configuring disk for instance " +#~ "%(instance_uuid)s, attempting to resize " +#~ "partition..." +#~ msgstr "" + +#~ msgid "Invalid value for injected_files: '%s'" +#~ msgstr "" + +#~ msgid "Starting VM %s..." +#~ msgstr "" + +#~ msgid "Spawning VM %(instance_uuid)s created %(vm_ref)s." +#~ msgstr "" + +#~ msgid "Instance %s: waiting for running" +#~ msgstr "" + +#~ msgid "Instance %s: running" +#~ msgstr "" + +#~ msgid "Resources to remove:%s" +#~ msgstr "" + +#~ msgid "Removing VDI %(vdi_ref)s(uuid:%(vdi_to_remove)s)" +#~ msgstr "" + +#~ msgid "Skipping VDI destroy for %s" +#~ msgstr "" + +#~ msgid "Finished snapshot and upload for VM %s" +#~ msgstr "" + +#~ msgid "Starting snapshot for VM %s" +#~ msgstr "" + +#~ msgid "Unable to Snapshot instance %(instance_uuid)s: %(exc)s" +#~ msgstr "" + +#~ msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d" +#~ msgstr "" + +#~ msgid "Resize instance %s complete" +#~ msgstr "" + +#~ msgid "domid changed from %(olddomid)s to %(newdomid)s" +#~ msgstr "" + +#~ msgid "VM %(instance_uuid)s already halted,skipping shutdown..." +#~ msgstr "" + +#~ msgid "Shutting down VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Destroying VDIs for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "" +#~ "Instance %(instance_uuid)s using RAW or " +#~ "VHD, skipping kernel and ramdisk " +#~ "deletion" +#~ msgstr "" + +#~ msgid "Instance %(instance_uuid)s VM destroyed" +#~ msgstr "" + +#~ msgid "Destroying VM for Instance %(instance_uuid)s" +#~ msgstr "" + +#~ msgid "Automatically hard rebooting %d" +#~ msgstr "" + +#~ msgid "Instance for migration %d not found, skipping" +#~ msgstr "" + +#~ msgid "injecting network info to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "creating vif(s) for vm: |%s|" +#~ msgstr "" + +#~ msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +#~ msgstr "" + +#~ msgid "injecting hostname to xs for vm: |%s|" +#~ msgstr "" + +#~ msgid "" +#~ "The agent call to %(method)s returned" +#~ " an invalid response: %(ret)r. VM " +#~ "id=%(instance_uuid)s; path=%(path)s; args=%(addl_args)r" +#~ msgstr "" + +#~ msgid "" +#~ "TIMEOUT: The call to %(method)s timed" +#~ " out. VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "NOT IMPLEMENTED: The call to %(method)s" +#~ " is not supported by the agent. " +#~ "VM id=%(instance_uuid)s; args=%(args)r" +#~ msgstr "" + +#~ msgid "" +#~ "The call to %(method)s returned an " +#~ "error: %(e)s. VM id=%(instance_uuid)s; " +#~ "args=%(args)r" +#~ msgstr "" + +#~ msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +#~ msgstr "" + +#~ msgid "Error destroying VDI" +#~ msgstr "" + +#~ msgid "\tVolume %s is NOT VSA volume" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s creation - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE create_volume for %s succeeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s deletion - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s failed" +#~ msgstr "" + +#~ msgid "VSA BE delete_volume for %s suceeded" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s local path call - call discover" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s ensure export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s create export - do nothing" +#~ msgstr "" + +#~ msgid "\tFE VSA Volume %s remove export - do nothing" +#~ msgstr "" + +#~ msgid "VSA BE remove_export for %s failed" +#~ msgstr "" + +#~ msgid "Failed to retrieve QoS info" +#~ msgstr "" + +#~ msgid "invalid drive data" +#~ msgstr "" + +#~ msgid "drive_name not defined" +#~ msgstr "" + +#~ msgid "invalid drive type name %s" +#~ msgstr "" + +#~ msgid "*** Experimental VSA code ***" +#~ msgstr "" + +#~ msgid "Requested number of VCs (%d) is too high. Setting to default" +#~ msgstr "" + +#~ msgid "Creating VSA: %s" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d %(vsa_name)s: Create " +#~ "volume %(vol_name)s, %(vol_size)d GB, type " +#~ "%(vol_type_id)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA status to %(status)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Update VSA call" +#~ msgstr "" + +#~ msgid "Adding %(add_cnt)s VCs to VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "Deleting %(del_cnt)s VCs from VSA %(vsa_name)s." +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Deleting %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Unable to delete volume %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Forced delete. %(direction)s volume %(vol_name)s" +#~ msgstr "" + +#~ msgid "Going to try to terminate VSA ID %s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Delete instance %(name)s" +#~ msgstr "" + +#~ msgid "Create call received for VSA %s" +#~ msgstr "" + +#~ msgid "Failed to find VSA %(vsa_id)d" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)s: Drive %(vol_id)s created. Status %(status)s" +#~ msgstr "" + +#~ msgid "Drive %(vol_name)s (%(vol_disp_name)s) still in creating phase - wait" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Not all volumes " +#~ "are created (%(cvol_real)d of %(cvol_exp)d)" +#~ msgstr "" + +#~ msgid "" +#~ "VSA ID %(vsa_id)d: Drive %(vol_name)s " +#~ "(%(vol_disp_name)s) is in %(status)s state" +#~ msgstr "" + +#~ msgid "Failed to update attach status for volume %(vol_name)s. %(ex)s" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Delete all BE volumes" +#~ msgstr "" + +#~ msgid "VSA ID %(vsa_id)d: Start %(vc_count)d instances" +#~ msgstr "" + diff --git a/cinder/log.py b/cinder/log.py new file mode 100644 index 00000000000..2e458659e63 --- /dev/null +++ b/cinder/log.py @@ -0,0 +1,416 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cinder logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. Additionally, an instance uuid +may be passed as part of the log message, which is intended to make it easier +for admins to find messages related to a specific instance. + +It also allows setting of formatting information through flags. + +""" + +import cStringIO +import inspect +import itertools +import json +import logging +import logging.config +import logging.handlers +import os +import stat +import sys +import traceback + +import cinder +from cinder import flags +from cinder.openstack.common import cfg +from cinder.openstack.common import local +from cinder import version + + +log_opts = [ + cfg.StrOpt('logging_context_format_string', + default='%(asctime)s %(levelname)s %(name)s [%(request_id)s ' + '%(user_id)s %(project_id)s] %(instance)s' + '%(message)s', + help='format string to use for log messages with context'), + cfg.StrOpt('logging_default_format_string', + default='%(asctime)s %(levelname)s %(name)s [-] %(instance)s' + '%(message)s', + help='format string to use for log messages without context'), + cfg.StrOpt('logging_debug_format_suffix', + default='from (pid=%(process)d) %(funcName)s ' + '%(pathname)s:%(lineno)d', + help='data to append to log format when level is DEBUG'), + cfg.StrOpt('logging_exception_prefix', + default='%(asctime)s TRACE %(name)s %(instance)s', + help='prefix each line of exception output with this format'), + cfg.ListOpt('default_log_levels', + default=[ + 'amqplib=WARN', + 'sqlalchemy=WARN', + 'boto=WARN', + 'suds=INFO', + 'eventlet.wsgi.server=WARN' + ], + help='list of logger=LEVEL pairs'), + cfg.BoolOpt('publish_errors', + default=False, + help='publish error events'), + + # NOTE(mikal): there are two options here because sometimes we are handed + # a full instance (and could include more information), and other times we + # are just handed a UUID for the instance. + cfg.StrOpt('instance_format', + default='[instance: %(uuid)s] ', + help='If an instance is passed with the log message, format ' + 'it like this'), + cfg.StrOpt('instance_uuid_format', + default='[instance: %(uuid)s] ', + help='If an instance UUID is passed with the log message, ' + 'format it like this'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(log_opts) + +# our new audit level +# NOTE(jkoelker) Since we synthesized an audit level, make the logging +# module aware of it so it acts like other levels. +logging.AUDIT = logging.INFO + 1 +logging.addLevelName(logging.AUDIT, 'AUDIT') + + +try: + NullHandler = logging.NullHandler +except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 + class NullHandler(logging.Handler): + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + + +def _dictify_context(context): + if context is None: + return None + if not isinstance(context, dict) and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def _get_binary_name(): + return os.path.basename(inspect.stack()[-1][1]) + + +def _get_log_file_path(binary=None): + logfile = FLAGS.log_file or FLAGS.logfile + logdir = FLAGS.log_dir or FLAGS.logdir + + if logfile and not logdir: + return logfile + + if logfile and logdir: + return os.path.join(logdir, logfile) + + if logdir: + binary = binary or _get_binary_name() + return '%s.log' % (os.path.join(logdir, binary),) + + +class CinderContextAdapter(logging.LoggerAdapter): + warn = logging.LoggerAdapter.warning + + def __init__(self, logger): + self.logger = logger + + def audit(self, msg, *args, **kwargs): + self.log(logging.AUDIT, msg, *args, **kwargs) + + def process(self, msg, kwargs): + if 'extra' not in kwargs: + kwargs['extra'] = {} + extra = kwargs['extra'] + + context = kwargs.pop('context', None) + if not context: + context = getattr(local.store, 'context', None) + if context: + extra.update(_dictify_context(context)) + + instance = kwargs.pop('instance', None) + instance_extra = '' + if instance: + instance_extra = FLAGS.instance_format % instance + else: + instance_uuid = kwargs.pop('instance_uuid', None) + if instance_uuid: + instance_extra = (FLAGS.instance_uuid_format + % {'uuid': instance_uuid}) + extra.update({'instance': instance_extra}) + + extra.update({"cinder_version": version.version_string_with_vcs()}) + extra['extra'] = extra.copy() + return msg, kwargs + + +class JSONFormatter(logging.Formatter): + def __init__(self, fmt=None, datefmt=None): + # NOTE(jkoelker) we ignore the fmt argument, but its still there + # since logging.config.fileConfig passes it. + self.datefmt = datefmt + + def formatException(self, ei, strip_newlines=True): + lines = traceback.format_exception(*ei) + if strip_newlines: + lines = [itertools.ifilter(lambda x: x, + line.rstrip().splitlines()) + for line in lines] + lines = list(itertools.chain(*lines)) + return lines + + def format(self, record): + message = {'message': record.getMessage(), + 'asctime': self.formatTime(record, self.datefmt), + 'name': record.name, + 'msg': record.msg, + 'args': record.args, + 'levelname': record.levelname, + 'levelno': record.levelno, + 'pathname': record.pathname, + 'filename': record.filename, + 'module': record.module, + 'lineno': record.lineno, + 'funcname': record.funcName, + 'created': record.created, + 'msecs': record.msecs, + 'relative_created': record.relativeCreated, + 'thread': record.thread, + 'thread_name': record.threadName, + 'process_name': record.processName, + 'process': record.process, + 'traceback': None} + + if hasattr(record, 'extra'): + message['extra'] = record.extra + + if record.exc_info: + message['traceback'] = self.formatException(record.exc_info) + + return json.dumps(message) + + +class LegacyCinderFormatter(logging.Formatter): + """A cinder.context.RequestContext aware formatter configured via flags. + + The flags used to set format strings are: logging_context_format_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default.""" + if 'instance' not in record.__dict__: + record.__dict__['instance'] = '' + + if record.__dict__.get('request_id', None): + self._fmt = FLAGS.logging_context_format_string + else: + self._fmt = FLAGS.logging_default_format_string + + if (record.levelno == logging.DEBUG and + FLAGS.logging_debug_format_suffix): + self._fmt += " " + FLAGS.logging_debug_format_suffix + + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with FLAGS.logging_exception_prefix.""" + if not record: + return logging.Formatter.formatException(self, exc_info) + + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split('\n') + stringbuffer.close() + + if FLAGS.logging_exception_prefix.find('%(asctime)') != -1: + record.asctime = self.formatTime(record, self.datefmt) + + formatted_lines = [] + for line in lines: + pl = FLAGS.logging_exception_prefix % record.__dict__ + fl = '%s%s' % (pl, line) + formatted_lines.append(fl) + return '\n'.join(formatted_lines) + + +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + if 'list_notifier_drivers' in FLAGS: + if 'cinder.notifier.log_notifier' in FLAGS.list_notifier_drivers: + return + cinder.notifier.api.notify('cinder.error.publisher', + 'error_notification', + cinder.notifier.api.ERROR, + dict(error=record.msg)) + + +def handle_exception(type, value, tb): + extra = {} + if FLAGS.verbose: + extra['exc_info'] = (type, value, tb) + getLogger().critical(str(value), **extra) + + +def setup(): + """Setup cinder logging.""" + sys.excepthook = handle_exception + + if FLAGS.log_config: + try: + logging.config.fileConfig(FLAGS.log_config) + except Exception: + traceback.print_exc() + raise + else: + _setup_logging_from_flags() + + +def _find_facility_from_flags(): + facility_names = logging.handlers.SysLogHandler.facility_names + facility = getattr(logging.handlers.SysLogHandler, + FLAGS.syslog_log_facility, + None) + + if facility is None and FLAGS.syslog_log_facility in facility_names: + facility = facility_names.get(FLAGS.syslog_log_facility) + + if facility is None: + valid_facilities = facility_names.keys() + consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', + 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', + 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', + 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', + 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] + valid_facilities.extend(consts) + raise TypeError(_('syslog facility must be one of: %s') % + ', '.join("'%s'" % fac + for fac in valid_facilities)) + + return facility + + +def _setup_logging_from_flags(): + cinder_root = getLogger().logger + for handler in cinder_root.handlers: + cinder_root.removeHandler(handler) + + if FLAGS.use_syslog: + facility = _find_facility_from_flags() + syslog = logging.handlers.SysLogHandler(address='/dev/log', + facility=facility) + cinder_root.addHandler(syslog) + + logpath = _get_log_file_path() + if logpath: + filelog = logging.handlers.WatchedFileHandler(logpath) + cinder_root.addHandler(filelog) + + mode = int(FLAGS.logfile_mode, 8) + st = os.stat(logpath) + if st.st_mode != (stat.S_IFREG | mode): + os.chmod(logpath, mode) + + if FLAGS.use_stderr: + streamlog = logging.StreamHandler() + cinder_root.addHandler(streamlog) + + elif not FLAGS.log_file: + streamlog = logging.StreamHandler(stream=sys.stdout) + cinder_root.addHandler(streamlog) + + if FLAGS.publish_errors: + cinder_root.addHandler(PublishErrorsHandler(logging.ERROR)) + + for handler in cinder_root.handlers: + datefmt = FLAGS.log_date_format + if FLAGS.log_format: + handler.setFormatter(logging.Formatter(fmt=FLAGS.log_format, + datefmt=datefmt)) + handler.setFormatter(LegacyCinderFormatter(datefmt=datefmt)) + + if FLAGS.verbose or FLAGS.debug: + cinder_root.setLevel(logging.DEBUG) + else: + cinder_root.setLevel(logging.INFO) + + level = logging.NOTSET + for pair in FLAGS.default_log_levels: + mod, _sep, level_name = pair.partition('=') + level = logging.getLevelName(level_name) + logger = logging.getLogger(mod) + logger.setLevel(level) + + # NOTE(jkoelker) Clear the handlers for the root logger that was setup + # by basicConfig in cinder/__init__.py and install the + # NullHandler. + root = logging.getLogger() + for handler in root.handlers: + root.removeHandler(handler) + handler = NullHandler() + handler.setFormatter(logging.Formatter()) + root.addHandler(handler) + + +_loggers = {} + + +def getLogger(name='cinder'): + if name not in _loggers: + _loggers[name] = CinderContextAdapter(logging.getLogger(name)) + return _loggers[name] + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) diff --git a/cinder/manager.py b/cinder/manager.py new file mode 100644 index 00000000000..9609e3906ea --- /dev/null +++ b/cinder/manager.py @@ -0,0 +1,205 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base Manager class. + +Managers are responsible for a certain aspect of the system. It is a logical +grouping of code relating to a portion of the system. In general other +components should be using the manager to make changes to the components that +it is responsible for. + +For example, other components that need to deal with volumes in some way, +should do so by calling methods on the VolumeManager instead of directly +changing fields in the database. This allows us to keep all of the code +relating to volumes in the same place. + +We have adopted a basic strategy of Smart managers and dumb data, which means +rather than attaching methods to data objects, components should call manager +methods that act on the data. + +Methods on managers that can be executed locally should be called directly. If +a particular method must execute on a remote host, this should be done via rpc +to the service that wraps the manager + +Managers should be responsible for most of the db access, and +non-implementation specific data. Anything implementation specific that can't +be generalized should be done by the Driver. + +In general, we prefer to have one manager with multiple drivers for different +implementations, but sometimes it makes sense to have multiple managers. You +can think of it this way: Abstract different overall strategies at the manager +level(FlatNetwork vs VlanNetwork), and different implementations at the driver +level(LinuxNetDriver vs CiscoNetDriver). + +Managers will often provide methods for initial setup of a host or periodic +tasks to a wrapping service. + +This module provides Manager, a base class for managers. + +""" + +from cinder.db import base +from cinder import flags +from cinder import log as logging +from cinder.scheduler import api +from cinder import version + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger(__name__) + + +def periodic_task(*args, **kwargs): + """Decorator to indicate that a method is a periodic task. + + This decorator can be used in two ways: + + 1. Without arguments '@periodic_task', this will be run on every tick + of the periodic scheduler. + + 2. With arguments, @periodic_task(ticks_between_runs=N), this will be + run on every N ticks of the periodic scheduler. + """ + def decorator(f): + f._periodic_task = True + f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0) + return f + + # NOTE(sirp): The `if` is necessary to allow the decorator to be used with + # and without parens. + # + # In the 'with-parens' case (with kwargs present), this function needs to + # return a decorator function since the interpreter will invoke it like: + # + # periodic_task(*args, **kwargs)(f) + # + # In the 'without-parens' case, the original function will be passed + # in as the first argument, like: + # + # periodic_task(f) + if kwargs: + return decorator + else: + return decorator(args[0]) + + +class ManagerMeta(type): + def __init__(cls, names, bases, dict_): + """Metaclass that allows us to collect decorated periodic tasks.""" + super(ManagerMeta, cls).__init__(names, bases, dict_) + + # NOTE(sirp): if the attribute is not present then we must be the base + # class, so, go ahead an initialize it. If the attribute is present, + # then we're a subclass so make a copy of it so we don't step on our + # parent's toes. + try: + cls._periodic_tasks = cls._periodic_tasks[:] + except AttributeError: + cls._periodic_tasks = [] + + try: + cls._ticks_to_skip = cls._ticks_to_skip.copy() + except AttributeError: + cls._ticks_to_skip = {} + + for value in cls.__dict__.values(): + if getattr(value, '_periodic_task', False): + task = value + name = task.__name__ + cls._periodic_tasks.append((name, task)) + cls._ticks_to_skip[name] = task._ticks_between_runs + + +class Manager(base.Base): + __metaclass__ = ManagerMeta + + def __init__(self, host=None, db_driver=None): + if not host: + host = FLAGS.host + self.host = host + super(Manager, self).__init__(db_driver) + + def periodic_tasks(self, context, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + for task_name, task in self._periodic_tasks: + full_task_name = '.'.join([self.__class__.__name__, task_name]) + + ticks_to_skip = self._ticks_to_skip[task_name] + if ticks_to_skip > 0: + LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s" + " ticks left until next run"), locals()) + self._ticks_to_skip[task_name] -= 1 + continue + + self._ticks_to_skip[task_name] = task._ticks_between_runs + LOG.debug(_("Running periodic task %(full_task_name)s"), locals()) + + try: + task(self, context) + except Exception as e: + if raise_on_error: + raise + LOG.exception(_("Error during %(full_task_name)s: %(e)s"), + locals()) + + def init_host(self): + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ + pass + + def service_version(self, context): + return version.version_string() + + def service_config(self, context): + config = {} + for key in FLAGS: + config[key] = FLAGS.get(key, None) + return config + + +class SchedulerDependentManager(Manager): + """Periodically send capability updates to the Scheduler services. + + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values. + + """ + + def __init__(self, host=None, db_driver=None, service_name='undefined'): + self.last_capabilities = None + self.service_name = service_name + super(SchedulerDependentManager, self).__init__(host, db_driver) + + def update_service_capabilities(self, capabilities): + """Remember these capabilities to send on next periodic update.""" + self.last_capabilities = capabilities + + @periodic_task + def _publish_service_capabilities(self, context): + """Pass data back to the scheduler at a periodic interval.""" + if self.last_capabilities: + LOG.debug(_('Notifying Schedulers of capabilities ...')) + api.update_service_capabilities(context, self.service_name, + self.host, self.last_capabilities) diff --git a/cinder/notifier/__init__.py b/cinder/notifier/__init__.py new file mode 100644 index 00000000000..482d54e4fdd --- /dev/null +++ b/cinder/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/notifier/api.py b/cinder/notifier/api.py new file mode 100644 index 00000000000..27c9e8421a5 --- /dev/null +++ b/cinder/notifier/api.py @@ -0,0 +1,133 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from cinder import flags +from cinder import utils +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils + + +LOG = logging.getLogger(__name__) + +notifier_opts = [ + cfg.StrOpt('default_notification_level', + default='INFO', + help='Default notification level for outgoing notifications'), + cfg.StrOpt('default_publisher_id', + default='$host', + help='Default publisher_id for outgoing notifications'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(notifier_opts) + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify_decorator(name, fn): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + + """ + def wrapped_func(*args, **kwarg): + body = {} + body['args'] = [] + body['kwarg'] = {} + for arg in args: + body['args'].append(arg) + for key in kwarg: + body['kwarg'][key] = kwarg[key] + notify(FLAGS.default_publisher_id, + name, + FLAGS.default_notification_level, + body) + return fn(*args, **kwarg) + return wrapped_func + + +def publisher_id(service, host=None): + if not host: + host = FLAGS.host + return "%s.%s" % (service, host) + + +def notify(publisher_id, event_type, priority, payload): + """Sends a notification using the specified driver + + :param publisher_id: the source worker_type.host of the message + :param event_type: the literal type of event (ex. Instance Creation) + :param priority: patterned after the enumeration of Python logging + levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + :param payload: A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id + a UUID representing the id for this notification + + timestamp + the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example:: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': utils.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities') % priority) + + # Ensure everything is JSON serializable. + payload = utils.to_primitive(payload, convert_instances=True) + + driver = importutils.import_module(FLAGS.notification_driver) + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(utils.utcnow())) + try: + driver.notify(msg) + except Exception, e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system. Payload=%(payload)s") % + locals()) diff --git a/cinder/notifier/capacity_notifier.py b/cinder/notifier/capacity_notifier.py new file mode 100644 index 00000000000..9cdbb72d013 --- /dev/null +++ b/cinder/notifier/capacity_notifier.py @@ -0,0 +1,81 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder import db +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +def notify(message): + """Look for specific compute manager events and interprete them + so as to keep the Capacity table up to date. + + NOTE: the True/False return codes are only for testing. + """ + + # The event_type must start with 'compute.instance.' + event_type = message.get('event_type', None) + preamble = 'compute.instance.' + if not event_type or not event_type.startswith(preamble): + return False + + # Events we're interested in end with .start and .end + event = event_type[len(preamble):] + parts = event.split('.') + suffix = parts[-1].lower() + event = event[:(-len(suffix) - 1)] + + if suffix not in ['start', 'end']: + return False + started = suffix == 'start' + ended = suffix == 'end' + + if started and event == 'create': + # We've already updated this stuff in the scheduler. Don't redo the + # work here. + return False + + work = 1 if started else -1 + + # Extract the host name from the publisher id ... + publisher_preamble = 'compute.' + publisher = message.get('publisher_id', None) + if not publisher or not publisher.startswith(publisher_preamble): + return False + host = publisher[len(publisher_preamble):] + + # If we deleted an instance, make sure we reclaim the resources. + # We may need to do something explicit for rebuild/migrate. + free_ram_mb = 0 + free_disk_gb = 0 + vms = 0 + if ended and event == 'delete': + vms = -1 + payload = message.get('payload', {}) + free_ram_mb = payload.get('memory_mb', 0) + free_disk_gb = payload.get('disk_gb', 0) + + LOG.debug("EventType=%(event_type)s -> host %(host)s: " + "ram %(free_ram_mb)d, disk %(free_disk_gb)d, " + "work %(work)d, vms%(vms)d" % locals()) + + db.api.compute_node_utilization_update(context.get_admin_context(), host, + free_ram_mb_delta=free_ram_mb, free_disk_gb_delta=free_disk_gb, + work_delta=work, vm_delta=vms) + + return True diff --git a/cinder/notifier/list_notifier.py b/cinder/notifier/list_notifier.py new file mode 100644 index 00000000000..665fad26221 --- /dev/null +++ b/cinder/notifier/list_notifier.py @@ -0,0 +1,71 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import exception as common_exception +from cinder.openstack.common import importutils + + +list_notifier_drivers_opt = cfg.MultiStrOpt('list_notifier_drivers', + default=['cinder.notifier.no_op_notifier'], + help='List of drivers to send notifications') + +FLAGS = flags.FLAGS +FLAGS.register_opt(list_notifier_drivers_opt) + +LOG = logging.getLogger(__name__) + +drivers = None + + +class ImportFailureNotifier(object): + """Noisily re-raises some exception over-and-over when notify is called.""" + + def __init__(self, exception): + self.exception = exception + + def notify(self, message): + raise self.exception + + +def _get_drivers(): + """Instantiates and returns drivers based on the flag values.""" + global drivers + if not drivers: + drivers = [] + for notification_driver in FLAGS.list_notifier_drivers: + try: + drivers.append(importutils.import_module(notification_driver)) + except ImportError as e: + drivers.append(ImportFailureNotifier(e)) + return drivers + + +def notify(message): + """Passes notification to multiple notifiers in a list.""" + for driver in _get_drivers(): + try: + driver.notify(message) + except Exception as e: + LOG.exception(_("Problem '%(e)s' attempting to send to " + "notification driver %(driver)s."), locals()) + + +def _reset_drivers(): + """Used by unit tests to reset the drivers.""" + global drivers + drivers = None diff --git a/cinder/notifier/log_notifier.py b/cinder/notifier/log_notifier.py new file mode 100644 index 00000000000..8bd78c01623 --- /dev/null +++ b/cinder/notifier/log_notifier.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from cinder import flags +from cinder import log as logging + + +FLAGS = flags.FLAGS + + +def notify(message): + """Notifies the recipient of the desired event given the model. + Log notifications using cinder's default logging system""" + + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'cinder.notification.%s' % message['event_type']) + getattr(logger, priority)(json.dumps(message)) diff --git a/cinder/notifier/no_op_notifier.py b/cinder/notifier/no_op_notifier.py new file mode 100644 index 00000000000..02971050565 --- /dev/null +++ b/cinder/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/cinder/notifier/rabbit_notifier.py b/cinder/notifier/rabbit_notifier.py new file mode 100644 index 00000000000..0b2942a2856 --- /dev/null +++ b/cinder/notifier/rabbit_notifier.py @@ -0,0 +1,46 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import cinder.context + +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import rpc + +LOG = logging.getLogger(__name__) + +notification_topic_opt = cfg.ListOpt('notification_topics', + default=['notifications', ], + help='AMQP topic used for Cinder notifications') + +FLAGS = flags.FLAGS +FLAGS.register_opt(notification_topic_opt) + + +def notify(message): + """Sends a notification to the RabbitMQ""" + context = cinder.context.get_admin_context() + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + for topic in FLAGS.notification_topics: + topic = '%s.%s' % (topic, priority) + try: + rpc.notify(context, topic, message) + except Exception, e: + LOG.exception(_("Could not send notification to %(topic)s. " + "Payload=%(message)s"), locals()) diff --git a/cinder/notifier/test_notifier.py b/cinder/notifier/test_notifier.py new file mode 100644 index 00000000000..6624a90aa16 --- /dev/null +++ b/cinder/notifier/test_notifier.py @@ -0,0 +1,25 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags + +FLAGS = flags.FLAGS + +NOTIFICATIONS = [] + + +def notify(message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/cinder/openstack/__init__.py b/cinder/openstack/__init__.py new file mode 100644 index 00000000000..0a3b98867a2 --- /dev/null +++ b/cinder/openstack/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/README b/cinder/openstack/common/README new file mode 100644 index 00000000000..def4a172aa2 --- /dev/null +++ b/cinder/openstack/common/README @@ -0,0 +1,13 @@ +openstack-common +---------------- + +A number of modules from openstack-common are imported into this project. + +These modules are "incubating" in openstack-common and are kept in sync +with the help of openstack-common's update.py script. See: + + http://wiki.openstack.org/CommonLibrary#Incubation + +The copy of the code should never be directly modified here. Please +always update openstack-common first and then run the script to copy +the changes across. diff --git a/cinder/openstack/common/__init__.py b/cinder/openstack/common/__init__.py new file mode 100644 index 00000000000..0a3b98867a2 --- /dev/null +++ b/cinder/openstack/common/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/openstack/common/cfg.py b/cinder/openstack/common/cfg.py new file mode 100644 index 00000000000..85aafec9e86 --- /dev/null +++ b/cinder/openstack/common/cfg.py @@ -0,0 +1,1298 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +r""" +Configuration options which may be set on the command line or in config files. + +The schema for each option is defined using the Opt sub-classes, e.g.: + +:: + + common_opts = [ + cfg.StrOpt('bind_host', + default='0.0.0.0', + help='IP address to listen on'), + cfg.IntOpt('bind_port', + default=9292, + help='Port number to listen on') + ] + +Options can be strings, integers, floats, booleans, lists or 'multi strings':: + + enabled_apis_opt = cfg.ListOpt('enabled_apis', + default=['ec2', 'osapi_compute'], + help='List of APIs to enable by default') + + DEFAULT_EXTENSIONS = [ + 'cinder.api.openstack.compute.contrib.standard_extensions' + ] + osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension', + default=DEFAULT_EXTENSIONS) + +Option schemas are registered with with the config manager at runtime, but +before the option is referenced:: + + class ExtensionManager(object): + + enabled_apis_opt = cfg.ListOpt(...) + + def __init__(self, conf): + self.conf = conf + self.conf.register_opt(enabled_apis_opt) + ... + + def _load_extensions(self): + for ext_factory in self.conf.osapi_compute_extension: + .... + +A common usage pattern is for each option schema to be defined in the module or +class which uses the option:: + + opts = ... + + def add_common_opts(conf): + conf.register_opts(opts) + + def get_bind_host(conf): + return conf.bind_host + + def get_bind_port(conf): + return conf.bind_port + +An option may optionally be made available via the command line. Such options +must registered with the config manager before the command line is parsed (for +the purposes of --help and CLI arg validation):: + + cli_opts = [ + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + ] + + def add_common_opts(conf): + conf.register_cli_opts(cli_opts) + +The config manager has a single CLI option defined by default, --config-file:: + + class ConfigOpts(object): + + config_file_opt = MultiStrOpt('config-file', + ... + + def __init__(self, ...): + ... + self.register_cli_opt(self.config_file_opt) + +Option values are parsed from any supplied config files using +openstack.common.iniparser. If none are specified, a default set is used +e.g. glance-api.conf and glance-common.conf:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + + glance-common.conf: + [DEFAULT] + bind_host = 0.0.0.0 + +Option values in config files override those on the command line. Config files +are parsed in order, with values in later files overriding those in earlier +files. + +The parsing of CLI args and config files is initiated by invoking the config +manager e.g.:: + + conf = ConfigOpts() + conf.register_opt(BoolOpt('verbose', ...)) + conf(sys.argv[1:]) + if conf.verbose: + ... + +Options can be registered as belonging to a group:: + + rabbit_group = cfg.OptGroup(name='rabbit', + title='RabbitMQ options') + + rabbit_host_opt = cfg.StrOpt('host', + default='localhost', + help='IP/hostname to listen on'), + rabbit_port_opt = cfg.IntOpt('port', + default=5672, + help='Port number to listen on') + + def register_rabbit_opts(conf): + conf.register_group(rabbit_group) + # options can be registered under a group in either of these ways: + conf.register_opt(rabbit_host_opt, group=rabbit_group) + conf.register_opt(rabbit_port_opt, group='rabbit') + +If no group is specified, options belong to the 'DEFAULT' section of config +files:: + + glance-api.conf: + [DEFAULT] + bind_port = 9292 + ... + + [rabbit] + host = localhost + port = 5672 + use_ssl = False + userid = guest + password = guest + virtual_host = / + +Command-line options in a group are automatically prefixed with the +group name:: + + --rabbit-host localhost --rabbit-port 9999 + +Option values in the default group are referenced as attributes/properties on +the config manager; groups are also attributes on the config manager, with +attributes for each of the options associated with the group:: + + server.start(app, conf.bind_port, conf.bind_host, conf) + + self.connection = kombu.connection.BrokerConnection( + hostname=conf.rabbit.host, + port=conf.rabbit.port, + ...) + +Option values may reference other values using PEP 292 string substitution:: + + opts = [ + cfg.StrOpt('state_path', + default=os.path.join(os.path.dirname(__file__), '../'), + help='Top-level directory for maintaining cinder state'), + cfg.StrOpt('sqlite_db', + default='cinder.sqlite', + help='file name for sqlite'), + cfg.StrOpt('sql_connection', + default='sqlite:///$state_path/$sqlite_db', + help='connection string for sql database'), + ] + +Note that interpolation can be avoided by using '$$'. + +For command line utilities that dispatch to other command line utilities, the +disable_interspersed_args() method is available. If this this method is called, +then parsing e.g.:: + + script --verbose cmd --debug /tmp/mything + +will no longer return:: + + ['cmd', '/tmp/mything'] + +as the leftover arguments, but will instead return:: + + ['cmd', '--debug', '/tmp/mything'] + +i.e. argument parsing is stopped at the first non-option argument. + +Options may be declared as secret so that their values are not leaked into +log files: + + opts = [ + cfg.StrOpt('s3_store_access_key', secret=True), + cfg.StrOpt('s3_store_secret_key', secret=True), + ... + ] + +""" + +import collections +import copy +import optparse +import os +import string +import sys + +from cinder.openstack.common import iniparser + + +class Error(Exception): + """Base class for cfg exceptions.""" + + def __init__(self, msg=None): + self.msg = msg + + def __str__(self): + return self.msg + + +class ArgsAlreadyParsedError(Error): + """Raised if a CLI opt is registered after parsing.""" + + def __str__(self): + ret = "arguments already parsed" + if self.msg: + ret += ": " + self.msg + return ret + + +class NoSuchOptError(Error, AttributeError): + """Raised if an opt which doesn't exist is referenced.""" + + def __init__(self, opt_name, group=None): + self.opt_name = opt_name + self.group = group + + def __str__(self): + if self.group is None: + return "no such option: %s" % self.opt_name + else: + return "no such option in group %s: %s" % (self.group.name, + self.opt_name) + + +class NoSuchGroupError(Error): + """Raised if a group which doesn't exist is referenced.""" + + def __init__(self, group_name): + self.group_name = group_name + + def __str__(self): + return "no such group: %s" % self.group_name + + +class DuplicateOptError(Error): + """Raised if multiple opts with the same name are registered.""" + + def __init__(self, opt_name): + self.opt_name = opt_name + + def __str__(self): + return "duplicate option: %s" % self.opt_name + + +class TemplateSubstitutionError(Error): + """Raised if an error occurs substituting a variable in an opt value.""" + + def __str__(self): + return "template substitution error: %s" % self.msg + + +class ConfigFilesNotFoundError(Error): + """Raised if one or more config files are not found.""" + + def __init__(self, config_files): + self.config_files = config_files + + def __str__(self): + return ('Failed to read some config files: %s' % + string.join(self.config_files, ',')) + + +class ConfigFileParseError(Error): + """Raised if there is an error parsing a config file.""" + + def __init__(self, config_file, msg): + self.config_file = config_file + self.msg = msg + + def __str__(self): + return 'Failed to parse %s: %s' % (self.config_file, self.msg) + + +class ConfigFileValueError(Error): + """Raised if a config file value does not match its opt type.""" + pass + + +def find_config_files(project=None, prog=None): + """Return a list of default configuration files. + + :param project: an optional project name + :param prog: the program name, defaulting to the basename of sys.argv[0] + + We default to two config files: [${project}.conf, ${prog}.conf] + + And we look for those config files in the following directories:: + + ~/.${project}/ + ~/ + /etc/${project}/ + /etc/ + + We return an absolute path for (at most) one of each the default config + files, for the topmost directory it exists in. + + For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf + and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf', + '~/.foo/bar.conf'] + + If no project name is supplied, we only look for ${prog.conf}. + """ + if prog is None: + prog = os.path.basename(sys.argv[0]) + + fix_path = lambda p: os.path.abspath(os.path.expanduser(p)) + + cfg_dirs = [ + fix_path(os.path.join('~', '.' + project)) if project else None, + fix_path('~'), + os.path.join('/etc', project) if project else None, + '/etc' + ] + cfg_dirs = filter(bool, cfg_dirs) + + def search_dirs(dirs, basename): + for d in dirs: + path = os.path.join(d, basename) + if os.path.exists(path): + return path + + config_files = [] + if project: + config_files.append(search_dirs(cfg_dirs, '%s.conf' % project)) + config_files.append(search_dirs(cfg_dirs, '%s.conf' % prog)) + + return filter(bool, config_files) + + +def _is_opt_registered(opts, opt): + """Check whether an opt with the same name is already registered. + + The same opt may be registered multiple times, with only the first + registration having any effect. However, it is an error to attempt + to register a different opt with the same name. + + :param opts: the set of opts already registered + :param opt: the opt to be registered + :returns: True if the opt was previously registered, False otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if opt.dest in opts: + if opts[opt.dest]['opt'] is not opt: + raise DuplicateOptError(opt.name) + return True + else: + return False + + +class Opt(object): + + """Base class for all configuration options. + + An Opt object has no public methods, but has a number of public string + properties: + + name: + the name of the option, which may include hyphens + dest: + the (hyphen-less) ConfigOpts property which contains the option value + short: + a single character CLI option name + default: + the default value of the option + metavar: + the name shown as the argument to a CLI option in --help output + help: + an string explaining how the options value is used + """ + multi = False + + def __init__(self, name, dest=None, short=None, default=None, + metavar=None, help=None, secret=False): + """Construct an Opt object. + + The only required parameter is the option's name. However, it is + common to also supply a default and help string for all options. + + :param name: the option's name + :param dest: the name of the corresponding ConfigOpts property + :param short: a single character CLI option name + :param default: the default value of the option + :param metavar: the option argument to show in --help + :param help: an explanation of how the option is used + :param secret: true iff the value should be obfuscated in log output + """ + self.name = name + if dest is None: + self.dest = self.name.replace('-', '_') + else: + self.dest = dest + self.short = short + self.default = default + self.metavar = metavar + self.help = help + self.secret = secret + + def _get_from_config_parser(self, cparser, section): + """Retrieves the option value from a MultiConfigParser object. + + This is the method ConfigOpts uses to look up the option value from + config files. Most opt types override this method in order to perform + type appropriate conversion of the returned value. + + :param cparser: a ConfigParser object + :param section: a section name + """ + return cparser.get(section, self.dest) + + def _add_to_cli(self, parser, group=None): + """Makes the option available in the command line interface. + + This is the method ConfigOpts uses to add the opt to the CLI interface + as appropriate for the opt type. Some opt types may extend this method, + others may just extend the helper methods it uses. + + :param parser: the CLI option parser + :param group: an optional OptGroup object + """ + container = self._get_optparse_container(parser, group) + kwargs = self._get_optparse_kwargs(group) + prefix = self._get_optparse_prefix('', group) + self._add_to_optparse(container, self.name, self.short, kwargs, prefix) + + def _add_to_optparse(self, container, name, short, kwargs, prefix=''): + """Add an option to an optparse parser or group. + + :param container: an optparse.OptionContainer object + :param name: the opt name + :param short: the short opt name + :param kwargs: the keyword arguments for add_option() + :param prefix: an optional prefix to prepend to the opt name + :raises: DuplicateOptError if a naming confict is detected + """ + args = ['--' + prefix + name] + if short: + args += ['-' + short] + for a in args: + if container.has_option(a): + raise DuplicateOptError(a) + container.add_option(*args, **kwargs) + + def _get_optparse_container(self, parser, group): + """Returns an optparse.OptionContainer. + + :param parser: an optparse.OptionParser + :param group: an (optional) OptGroup object + :returns: an optparse.OptionGroup if a group is given, else the parser + """ + if group is not None: + return group._get_optparse_group(parser) + else: + return parser + + def _get_optparse_kwargs(self, group, **kwargs): + """Build a dict of keyword arguments for optparse's add_option(). + + Most opt types extend this method to customize the behaviour of the + options added to optparse. + + :param group: an optional group + :param kwargs: optional keyword arguments to add to + :returns: a dict of keyword arguments + """ + dest = self.dest + if group is not None: + dest = group.name + '_' + dest + kwargs.update({ + 'dest': dest, + 'metavar': self.metavar, + 'help': self.help, + }) + return kwargs + + def _get_optparse_prefix(self, prefix, group): + """Build a prefix for the CLI option name, if required. + + CLI options in a group are prefixed with the group's name in order + to avoid conflicts between similarly named options in different + groups. + + :param prefix: an existing prefix to append to (e.g. 'no' or '') + :param group: an optional OptGroup object + :returns: a CLI option prefix including the group name, if appropriate + """ + if group is not None: + return group.name + '-' + prefix + else: + return prefix + + +class StrOpt(Opt): + """ + String opts do not have their values transformed and are returned as + str objects. + """ + pass + + +class BoolOpt(Opt): + + """ + Bool opts are set to True or False on the command line using --optname or + --noopttname respectively. + + In config files, boolean values are case insensitive and can be set using + 1/0, yes/no, true/false or on/off. + """ + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a boolean from ConfigParser.""" + def convert_bool(v): + value = self._boolean_states.get(v.lower()) + if value is None: + raise ValueError('Unexpected boolean value %r' % v) + + return value + + return [convert_bool(v) for v in cparser.get(section, self.dest)] + + def _add_to_cli(self, parser, group=None): + """Extends the base class method to add the --nooptname option.""" + super(BoolOpt, self)._add_to_cli(parser, group) + self._add_inverse_to_optparse(parser, group) + + def _add_inverse_to_optparse(self, parser, group): + """Add the --nooptname option to the option parser.""" + container = self._get_optparse_container(parser, group) + kwargs = self._get_optparse_kwargs(group, action='store_false') + prefix = self._get_optparse_prefix('no', group) + kwargs["help"] = "The inverse of --" + self.name + self._add_to_optparse(container, self.name, None, kwargs, prefix) + + def _get_optparse_kwargs(self, group, action='store_true', **kwargs): + """Extends the base optparse keyword dict for boolean options.""" + return super(BoolOpt, + self)._get_optparse_kwargs(group, action=action, **kwargs) + + +class IntOpt(Opt): + + """Int opt values are converted to integers using the int() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a integer from ConfigParser.""" + return [int(v) for v in cparser.get(section, self.dest)] + + def _get_optparse_kwargs(self, group, **kwargs): + """Extends the base optparse keyword dict for integer options.""" + return super(IntOpt, + self)._get_optparse_kwargs(group, type='int', **kwargs) + + +class FloatOpt(Opt): + + """Float opt values are converted to floats using the float() builtin.""" + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a float from ConfigParser.""" + return [float(v) for v in cparser.get(section, self.dest)] + + def _get_optparse_kwargs(self, group, **kwargs): + """Extends the base optparse keyword dict for float options.""" + return super(FloatOpt, + self)._get_optparse_kwargs(group, type='float', **kwargs) + + +class ListOpt(Opt): + + """ + List opt values are simple string values separated by commas. The opt value + is a list containing these strings. + """ + + def _get_from_config_parser(self, cparser, section): + """Retrieve the opt value as a list from ConfigParser.""" + return [v.split(',') for v in cparser.get(section, self.dest)] + + def _get_optparse_kwargs(self, group, **kwargs): + """Extends the base optparse keyword dict for list options.""" + return super(ListOpt, + self)._get_optparse_kwargs(group, + type='string', + action='callback', + callback=self._parse_list, + **kwargs) + + def _parse_list(self, option, opt, value, parser): + """An optparse callback for parsing an option value into a list.""" + setattr(parser.values, self.dest, value.split(',')) + + +class MultiStrOpt(Opt): + + """ + Multistr opt values are string opts which may be specified multiple times. + The opt value is a list containing all the string values specified. + """ + multi = True + + def _get_optparse_kwargs(self, group, **kwargs): + """Extends the base optparse keyword dict for multi str options.""" + return super(MultiStrOpt, + self)._get_optparse_kwargs(group, action='append') + + +class OptGroup(object): + + """ + Represents a group of opts. + + CLI opts in the group are automatically prefixed with the group name. + + Each group corresponds to a section in config files. + + An OptGroup object has no public methods, but has a number of public string + properties: + + name: + the name of the group + title: + the group title as displayed in --help + help: + the group description as displayed in --help + """ + + def __init__(self, name, title=None, help=None): + """Constructs an OptGroup object. + + :param name: the group name + :param title: the group title for --help + :param help: the group description for --help + """ + self.name = name + if title is None: + self.title = "%s options" % title + else: + self.title = title + self.help = help + + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._optparse_group = None + + def _register_opt(self, opt): + """Add an opt to this group. + + :param opt: an Opt object + :returns: False if previously registered, True otherwise + :raises: DuplicateOptError if a naming conflict is detected + """ + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'override': None, 'default': None} + + return True + + def _get_optparse_group(self, parser): + """Build an optparse.OptionGroup for this group.""" + if self._optparse_group is None: + self._optparse_group = optparse.OptionGroup(parser, self.title, + self.help) + return self._optparse_group + + +class ParseError(iniparser.ParseError): + def __init__(self, msg, lineno, line, filename): + super(ParseError, self).__init__(msg, lineno, line) + self.filename = filename + + def __str__(self): + return 'at %s:%d, %s: %r' % (self.filename, self.lineno, + self.msg, self.line) + + +class ConfigParser(iniparser.BaseParser): + def __init__(self, filename, sections): + super(ConfigParser, self).__init__() + self.filename = filename + self.sections = sections + self.section = None + + def parse(self): + with open(self.filename) as f: + return super(ConfigParser, self).parse(f) + + def new_section(self, section): + self.section = section + self.sections.setdefault(self.section, {}) + + def assignment(self, key, value): + if not self.section: + raise self.error_no_section() + + self.sections[self.section].setdefault(key, []) + self.sections[self.section][key].append('\n'.join(value)) + + def parse_exc(self, msg, lineno, line=None): + return ParseError(msg, lineno, line, self.filename) + + def error_no_section(self): + return self.parse_exc('Section must be started before assignment', + self.lineno) + + +class MultiConfigParser(object): + def __init__(self): + self.sections = {} + + def read(self, config_files): + read_ok = [] + + for filename in config_files: + parser = ConfigParser(filename, self.sections) + + try: + parser.parse() + except IOError: + continue + + read_ok.append(filename) + + return read_ok + + def get(self, section, name): + return self.sections[section][name] + + +class ConfigOpts(collections.Mapping): + + """ + Config options which may be set on the command line or in config files. + + ConfigOpts is a configuration option manager with APIs for registering + option schemas, grouping options, parsing option values and retrieving + the values of options. + """ + + def __init__(self, + project=None, + prog=None, + version=None, + usage=None, + default_config_files=None): + """Construct a ConfigOpts object. + + Automatically registers the --config-file option with either a supplied + list of default config files, or a list from find_config_files(). + + :param project: the toplevel project name, used to locate config files + :param prog: the name of the program (defaults to sys.argv[0] basename) + :param version: the program version (for --version) + :param usage: a usage string (%prog will be expanded) + :param default_config_files: config files to use by default + """ + if prog is None: + prog = os.path.basename(sys.argv[0]) + + if default_config_files is None: + default_config_files = find_config_files(project, prog) + + self.project = project + self.prog = prog + self.version = version + self.usage = usage + self.default_config_files = default_config_files + + self._opts = {} # dict of dicts of (opt:, override:, default:) + self._groups = {} + + self._args = None + self._cli_values = {} + + self._oparser = optparse.OptionParser(prog=self.prog, + version=self.version, + usage=self.usage) + self._cparser = None + + self.register_cli_opt( + MultiStrOpt('config-file', + default=self.default_config_files, + metavar='PATH', + help='Path to a config file to use. Multiple config ' + 'files can be specified, with values in later ' + 'files taking precedence. The default files used ' + 'are: %s' % (self.default_config_files, ))) + + def __call__(self, args=None): + """Parse command line arguments and config files. + + Calling a ConfigOpts object causes the supplied command line arguments + and config files to be parsed, causing opt values to be made available + as attributes of the object. + + The object may be called multiple times, each time causing the previous + set of values to be overwritten. + + :params args: command line arguments (defaults to sys.argv[1:]) + :returns: the list of arguments left over after parsing options + :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError + """ + self.reset() + + self._args = args + + (values, args) = self._oparser.parse_args(self._args) + + self._cli_values = vars(values) + + if self.config_file: + self._parse_config_files(self.config_file) + + return args + + def __getattr__(self, name): + """Look up an option value and perform string substitution. + + :param name: the opt name (or 'dest', more precisely) + :returns: the option value (after string subsititution) or a GroupAttr + :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError + """ + return self._substitute(self._get(name)) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self._opts or key in self._groups + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self._opts.keys() + self._groups.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self._opts) + len(self._groups) + + def reset(self): + """Reset the state of the object to before it was called.""" + self._args = None + self._cli_values = None + self._cparser = None + + def register_opt(self, opt, group=None): + """Register an option schema. + + Registering an option schema makes any option value which is previously + or subsequently parsed from the command line or config files available + as an attribute of this object. + + :param opt: an instance of an Opt sub-class + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError + """ + if group is not None: + return self._get_group(group)._register_opt(opt) + + if _is_opt_registered(self._opts, opt): + return False + + self._opts[opt.dest] = {'opt': opt, 'override': None, 'default': None} + + return True + + def register_opts(self, opts, group=None): + """Register multiple option schemas at once.""" + for opt in opts: + self.register_opt(opt, group) + + def register_cli_opt(self, opt, group=None): + """Register a CLI option schema. + + CLI option schemas must be registered before the command line and + config files are parsed. This is to ensure that all CLI options are + show in --help and option validation works as expected. + + :param opt: an instance of an Opt sub-class + :param group: an optional OptGroup object or group name + :return: False if the opt was already register, True otherwise + :raises: DuplicateOptError, ArgsAlreadyParsedError + """ + if self._args is not None: + raise ArgsAlreadyParsedError("cannot register CLI option") + + if not self.register_opt(opt, group): + return False + + if group is not None: + group = self._get_group(group) + + opt._add_to_cli(self._oparser, group) + + return True + + def register_cli_opts(self, opts, group=None): + """Register multiple CLI option schemas at once.""" + for opt in opts: + self.register_cli_opt(opt, group) + + def register_group(self, group): + """Register an option group. + + An option group must be registered before options can be registered + with the group. + + :param group: an OptGroup object + """ + if group.name in self._groups: + return + + self._groups[group.name] = copy.copy(group) + + def set_override(self, name, override, group=None): + """Override an opt value. + + Override the command line, config file and default values of a + given option. + + :param name: the name/dest of the opt + :param override: the override value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['override'] = override + + def set_default(self, name, default, group=None): + """Override an opt's default value. + + Override the default value of given option. A command line or + config file value will still take precedence over this default. + + :param name: the name/dest of the opt + :param default: the default value + :param group: an option OptGroup object or group name + :raises: NoSuchOptError, NoSuchGroupError + """ + opt_info = self._get_opt_info(name, group) + opt_info['default'] = default + + def disable_interspersed_args(self): + """Set parsing to stop on the first non-option. + + If this this method is called, then parsing e.g. + + script --verbose cmd --debug /tmp/mything + + will no longer return: + + ['cmd', '/tmp/mything'] + + as the leftover arguments, but will instead return: + + ['cmd', '--debug', '/tmp/mything'] + + i.e. argument parsing is stopped at the first non-option argument. + """ + self._oparser.disable_interspersed_args() + + def enable_interspersed_args(self): + """Set parsing to not stop on the first non-option. + + This it the default behaviour.""" + self._oparser.enable_interspersed_args() + + def log_opt_values(self, logger, lvl): + """Log the value of all registered opts. + + It's often useful for an app to log its configuration to a log file at + startup for debugging. This method dumps to the entire config state to + the supplied logger at a given log level. + + :param logger: a logging.Logger object + :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log() + """ + logger.log(lvl, "*" * 80) + logger.log(lvl, "Configuration options gathered from:") + logger.log(lvl, "command line args: %s", self._args) + logger.log(lvl, "config files: %s", self.config_file) + logger.log(lvl, "=" * 80) + + def _sanitize(opt, value): + """Obfuscate values of options declared secret""" + return value if not opt.secret else '*' * len(str(value)) + + for opt_name in sorted(self._opts): + opt = self._get_opt_info(opt_name)['opt'] + logger.log(lvl, "%-30s = %s", opt_name, + _sanitize(opt, getattr(self, opt_name))) + + for group_name in self._groups: + group_attr = self.GroupAttr(self, self._get_group(group_name)) + for opt_name in sorted(self._groups[group_name]._opts): + opt = self._get_opt_info(opt_name, group_name)['opt'] + logger.log(lvl, "%-30s = %s", + "%s.%s" % (group_name, opt_name), + _sanitize(opt, getattr(group_attr, opt_name))) + + logger.log(lvl, "*" * 80) + + def print_usage(self, file=None): + """Print the usage message for the current program.""" + self._oparser.print_usage(file) + + def print_help(self, file=None): + """Print the help message for the current program.""" + self._oparser.print_help(file) + + def _get(self, name, group=None): + """Look up an option value. + + :param name: the opt name (or 'dest', more precisely) + :param group: an OptGroup + :returns: the option value, or a GroupAttr object + :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError, + TemplateSubstitutionError + """ + if group is None and name in self._groups: + return self.GroupAttr(self, self._get_group(name)) + + info = self._get_opt_info(name, group) + default, opt, override = map(lambda k: info[k], sorted(info.keys())) + + if override is not None: + return override + + values = [] + if self._cparser is not None: + section = group.name if group is not None else 'DEFAULT' + try: + value = opt._get_from_config_parser(self._cparser, section) + except KeyError: + pass + except ValueError as ve: + raise ConfigFileValueError(str(ve)) + else: + if not opt.multi: + # No need to continue since the last value wins + return value[-1] + values.extend(value) + + name = name if group is None else group.name + '_' + name + value = self._cli_values.get(name) + if value is not None: + if not opt.multi: + return value + + return value + values + + if values: + return values + + if default is not None: + return default + + return opt.default + + def _substitute(self, value): + """Perform string template substitution. + + Substititue any template variables (e.g. $foo, ${bar}) in the supplied + string value(s) with opt values. + + :param value: the string value, or list of string values + :returns: the substituted string(s) + """ + if isinstance(value, list): + return [self._substitute(i) for i in value] + elif isinstance(value, str): + tmpl = string.Template(value) + return tmpl.safe_substitute(self.StrSubWrapper(self)) + else: + return value + + def _get_group(self, group_or_name): + """Looks up a OptGroup object. + + Helper function to return an OptGroup given a parameter which can + either be the group's name or an OptGroup object. + + The OptGroup object returned is from the internal dict of OptGroup + objects, which will be a copy of any OptGroup object that users of + the API have access to. + + :param group_or_name: the group's name or the OptGroup object itself + :raises: NoSuchGroupError + """ + if isinstance(group_or_name, OptGroup): + group_name = group_or_name.name + else: + group_name = group_or_name + + if not group_name in self._groups: + raise NoSuchGroupError(group_name) + + return self._groups[group_name] + + def _get_opt_info(self, opt_name, group=None): + """Return the (opt, override, default) dict for an opt. + + :param opt_name: an opt name/dest + :param group: an optional group name or OptGroup object + :raises: NoSuchOptError, NoSuchGroupError + """ + if group is None: + opts = self._opts + else: + group = self._get_group(group) + opts = group._opts + + if not opt_name in opts: + raise NoSuchOptError(opt_name, group) + + return opts[opt_name] + + def _parse_config_files(self, config_files): + """Parse the supplied configuration files. + + :raises: ConfigFilesNotFoundError, ConfigFileParseError + """ + self._cparser = MultiConfigParser() + + try: + read_ok = self._cparser.read(config_files) + except iniparser.ParseError as pe: + raise ConfigFileParseError(pe.filename, str(pe)) + + if read_ok != config_files: + not_read_ok = filter(lambda f: f not in read_ok, config_files) + raise ConfigFilesNotFoundError(not_read_ok) + + class GroupAttr(collections.Mapping): + + """ + A helper class representing the option values of a group as a mapping + and attributes. + """ + + def __init__(self, conf, group): + """Construct a GroupAttr object. + + :param conf: a ConfigOpts object + :param group: an OptGroup object + """ + self.conf = conf + self.group = group + + def __getattr__(self, name): + """Look up an option value and perform template substitution.""" + return self.conf._substitute(self.conf._get(name, self.group)) + + def __getitem__(self, key): + """Look up an option value and perform string substitution.""" + return self.__getattr__(key) + + def __contains__(self, key): + """Return True if key is the name of a registered opt or group.""" + return key in self.group._opts + + def __iter__(self): + """Iterate over all registered opt and group names.""" + for key in self.group._opts.keys(): + yield key + + def __len__(self): + """Return the number of options and option groups.""" + return len(self.group._opts) + + class StrSubWrapper(object): + + """ + A helper class exposing opt values as a dict for string substitution. + """ + + def __init__(self, conf): + """Construct a StrSubWrapper object. + + :param conf: a ConfigOpts object + """ + self.conf = conf + + def __getitem__(self, key): + """Look up an opt value from the ConfigOpts object. + + :param key: an opt name + :returns: an opt value + :raises: TemplateSubstitutionError if attribute is a group + """ + value = getattr(self.conf, key) + if isinstance(value, self.conf.GroupAttr): + raise TemplateSubstitutionError( + 'substituting group %s not supported' % key) + return value + + +class CommonConfigOpts(ConfigOpts): + + DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" + DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + + common_cli_opts = [ + BoolOpt('debug', + short='d', + default=False, + help='Print debugging output'), + BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output'), + ] + + logging_cli_opts = [ + StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + StrOpt('log-format', + default=DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %default'), + StrOpt('log-date-format', + default=DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %(asctime)s in log records. ' + 'Default: %default'), + StrOpt('log-file', + metavar='PATH', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + StrOpt('log-dir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --logfile)'), + BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') + ] + + def __init__(self, **kwargs): + super(CommonConfigOpts, self).__init__(**kwargs) + self.register_cli_opts(self.common_cli_opts) + self.register_cli_opts(self.logging_cli_opts) diff --git a/cinder/openstack/common/exception.py b/cinder/openstack/common/exception.py new file mode 100644 index 00000000000..ba32da550b0 --- /dev/null +++ b/cinder/openstack/common/exception.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exceptions common to OpenStack projects +""" + +import logging + + +class ProcessExecutionError(IOError): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % ( + description, cmd, exit_code, stdout, stderr) + IOError.__init__(self, message) + + +class Error(Exception): + def __init__(self, message=None): + super(Error, self).__init__(message) + + +class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): + self.message = message + self.code = code + super(ApiError, self).__init__('%s: %s' % (code, message)) + + +class NotFound(Error): + pass + + +class UnknownScheme(Error): + + msg = "Unknown scheme '%s' found in URI" + + def __init__(self, scheme): + msg = self.__class__.msg % scheme + super(UnknownScheme, self).__init__(msg) + + +class BadStoreUri(Error): + + msg = "The Store URI %s was malformed. Reason: %s" + + def __init__(self, uri, reason): + msg = self.__class__.msg % (uri, reason) + super(BadStoreUri, self).__init__(msg) + + +class Duplicate(Error): + pass + + +class NotAuthorized(Error): + pass + + +class NotEmpty(Error): + pass + + +class Invalid(Error): + pass + + +class BadInputError(Exception): + """Error resulting from a client sending bad input to a server""" + pass + + +class MissingArgumentError(Error): + pass + + +class DatabaseMigrationError(Error): + pass + + +class ClientConnectionError(Exception): + """Error resulting from a client connecting to a server""" + pass + + +def wrap_exception(f): + def _wrap(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if not isinstance(e, Error): + #exc_type, exc_value, exc_traceback = sys.exc_info() + logging.exception('Uncaught exception') + #logging.error(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + _wrap.func_name = f.func_name + return _wrap + + +class OpenstackException(Exception): + """ + Base Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = "An unknown exception occurred" + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class MalformedRequestBody(OpenstackException): + message = "Malformed message body: %(reason)s" + + +class InvalidContentType(OpenstackException): + message = "Invalid content type %(content_type)s" diff --git a/cinder/openstack/common/importutils.py b/cinder/openstack/common/importutils.py new file mode 100644 index 00000000000..9c0815c72ea --- /dev/null +++ b/cinder/openstack/common/importutils.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Import related utilities and helper functions. +""" + +import sys + +from cinder.openstack.common import exception + + +def import_class(import_str): + """Returns a class from a string including module and class""" + mod_str, _sep, class_str = import_str.rpartition('.') + try: + __import__(mod_str) + return getattr(sys.modules[mod_str], class_str) + except (ImportError, ValueError, AttributeError): + raise exception.NotFound('Class %s cannot be found' % class_str) + + +def import_object(import_str, *args, **kwargs): + """Import a class and return an instance of it.""" + return import_class(import_str)(*args, **kwargs) + + +def import_module(import_str): + """Import a module.""" + __import__(import_str) + return sys.modules[import_str] diff --git a/cinder/openstack/common/iniparser.py b/cinder/openstack/common/iniparser.py new file mode 100644 index 00000000000..53ca0233436 --- /dev/null +++ b/cinder/openstack/common/iniparser.py @@ -0,0 +1,126 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class ParseError(Exception): + def __init__(self, message, lineno, line): + self.msg = message + self.line = line + self.lineno = lineno + + def __str__(self): + return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line) + + +class BaseParser(object): + lineno = 0 + parse_exc = ParseError + + def _assignment(self, key, value): + self.assignment(key, value) + return None, [] + + def _get_section(self, line): + if line[-1] != ']': + return self.error_no_section_end_bracket(line) + if len(line) <= 2: + return self.error_no_section_name(line) + + return line[1:-1] + + def _split_key_value(self, line): + colon = line.find(':') + equal = line.find('=') + if colon < 0 and equal < 0: + return self.error_invalid_assignment(line) + + if colon < 0 or (equal >= 0 and equal < colon): + key, value = line[:equal], line[equal + 1:] + else: + key, value = line[:colon], line[colon + 1:] + + return key.strip(), [value.strip()] + + def parse(self, lineiter): + key = None + value = [] + + for line in lineiter: + self.lineno += 1 + + line = line.rstrip() + if not line: + # Blank line, ends multi-line values + if key: + key, value = self._assignment(key, value) + continue + elif line[0] in (' ', '\t'): + # Continuation of previous assignment + if key is None: + self.error_unexpected_continuation(line) + else: + value.append(line.lstrip()) + continue + + if key: + # Flush previous assignment, if any + key, value = self._assignment(key, value) + + if line[0] == '[': + # Section start + section = self._get_section(line) + if section: + self.new_section(section) + elif line[0] in '#;': + self.comment(line[1:].lstrip()) + else: + key, value = self._split_key_value(line) + if not key: + return self.error_empty_key(line) + + if key: + # Flush previous assignment, if any + self._assignment(key, value) + + def assignment(self, key, value): + """Called when a full assignment is parsed""" + raise NotImplementedError() + + def new_section(self, section): + """Called when a new section is started""" + raise NotImplementedError() + + def comment(self, comment): + """Called when a comment is parsed""" + pass + + def error_invalid_assignment(self, line): + raise self.parse_exc("No ':' or '=' found in assignment", + self.lineno, line) + + def error_empty_key(self, line): + raise self.parse_exc('Key cannot be empty', self.lineno, line) + + def error_unexpected_continuation(self, line): + raise self.parse_exc('Unexpected continuation line', + self.lineno, line) + + def error_no_section_end_bracket(self, line): + raise self.parse_exc('Invalid section (must end with ])', + self.lineno, line) + + def error_no_section_name(self, line): + raise self.parse_exc('Empty section name', self.lineno, line) diff --git a/cinder/openstack/common/local.py b/cinder/openstack/common/local.py new file mode 100644 index 00000000000..19d962732c1 --- /dev/null +++ b/cinder/openstack/common/local.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Greenthread local storage of variables using weak references""" + +import weakref + +from eventlet import corolocal + + +class WeakLocal(corolocal.local): + def __getattribute__(self, attr): + rval = corolocal.local.__getattribute__(self, attr) + if rval: + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return corolocal.local.__setattr__(self, attr, value) + + +store = WeakLocal() diff --git a/cinder/policy.py b/cinder/policy.py new file mode 100644 index 00000000000..646530385dc --- /dev/null +++ b/cinder/policy.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Cinder""" + +from cinder.common import policy +from cinder import exception +from cinder import flags +from cinder.openstack.common import cfg +from cinder import utils + + +policy_opts = [ + cfg.StrOpt('policy_file', + default='policy.json', + help=_('JSON file representing policy')), + cfg.StrOpt('policy_default_rule', + default='default', + help=_('Rule checked when requested rule is not found')), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(policy_opts) + +_POLICY_PATH = None +_POLICY_CACHE = {} + + +def reset(): + global _POLICY_PATH + global _POLICY_CACHE + _POLICY_PATH = None + _POLICY_CACHE = {} + policy.reset() + + +def init(): + global _POLICY_PATH + global _POLICY_CACHE + if not _POLICY_PATH: + _POLICY_PATH = utils.find_config(FLAGS.policy_file) + utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE, + reload_func=_set_brain) + + +def _set_brain(data): + default_rule = FLAGS.policy_default_rule + policy.set_brain(policy.HttpBrain.load_json(data, default_rule)) + + +def enforce(context, action, target): + """Verifies that the action is valid on the target in this context. + + :param context: cinder context + :param action: string representing the action to be checked + this should be colon separated for clarity. + i.e. ``compute:create_instance``, + ``compute:attach_volume``, + ``volume:attach_volume`` + + :param object: dictionary representing the object of the action + for object creation this should be a dictionary representing the + location of the object e.g. ``{'project_id': context.project_id}`` + + :raises cinder.exception.PolicyNotAllowed: if verification fails. + + """ + init() + + match_list = ('rule:%s' % action,) + credentials = context.to_dict() + + try: + policy.enforce(match_list, target, credentials) + except policy.NotAuthorized: + raise exception.PolicyNotAuthorized(action=action) diff --git a/cinder/quota.py b/cinder/quota.py new file mode 100644 index 00000000000..2df6c89a4f2 --- /dev/null +++ b/cinder/quota.py @@ -0,0 +1,234 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Quotas for instances, volumes, and floating ips.""" + +from cinder import db +from cinder.openstack.common import cfg +from cinder import flags + + +quota_opts = [ + cfg.IntOpt('quota_instances', + default=10, + help='number of instances allowed per project'), + cfg.IntOpt('quota_cores', + default=20, + help='number of instance cores allowed per project'), + cfg.IntOpt('quota_ram', + default=50 * 1024, + help='megabytes of instance ram allowed per project'), + cfg.IntOpt('quota_volumes', + default=10, + help='number of volumes allowed per project'), + cfg.IntOpt('quota_gigabytes', + default=1000, + help='number of volume gigabytes allowed per project'), + cfg.IntOpt('quota_floating_ips', + default=10, + help='number of floating ips allowed per project'), + cfg.IntOpt('quota_metadata_items', + default=128, + help='number of metadata items allowed per instance'), + cfg.IntOpt('quota_injected_files', + default=5, + help='number of injected files allowed'), + cfg.IntOpt('quota_injected_file_content_bytes', + default=10 * 1024, + help='number of bytes allowed per injected file'), + cfg.IntOpt('quota_injected_file_path_bytes', + default=255, + help='number of bytes allowed per injected file path'), + cfg.IntOpt('quota_security_groups', + default=10, + help='number of security groups per project'), + cfg.IntOpt('quota_security_group_rules', + default=20, + help='number of security rules per security group'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(quota_opts) + + +quota_resources = ['metadata_items', 'injected_file_content_bytes', + 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', + 'injected_files', 'cores', 'security_groups', 'security_group_rules'] + + +def _get_default_quotas(): + defaults = { + 'instances': FLAGS.quota_instances, + 'cores': FLAGS.quota_cores, + 'ram': FLAGS.quota_ram, + 'volumes': FLAGS.quota_volumes, + 'gigabytes': FLAGS.quota_gigabytes, + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items, + 'injected_files': FLAGS.quota_injected_files, + 'injected_file_content_bytes': + FLAGS.quota_injected_file_content_bytes, + 'security_groups': FLAGS.quota_security_groups, + 'security_group_rules': FLAGS.quota_security_group_rules, + } + # -1 in the quota flags means unlimited + return defaults + + +def get_class_quotas(context, quota_class, defaults=None): + """Update defaults with the quota class values.""" + + if not defaults: + defaults = _get_default_quotas() + + quota = db.quota_class_get_all_by_name(context, quota_class) + for key in defaults.keys(): + if key in quota: + defaults[key] = quota[key] + + return defaults + + +def get_project_quotas(context, project_id): + defaults = _get_default_quotas() + if context.quota_class: + get_class_quotas(context, context.quota_class, defaults) + quota = db.quota_get_all_by_project(context, project_id) + for key in defaults.keys(): + if key in quota: + defaults[key] = quota[key] + return defaults + + +def _get_request_allotment(requested, used, quota): + if quota == -1: + return requested + return quota - used + + +def allowed_instances(context, requested_instances, instance_type): + """Check quota and return min(requested_instances, allowed_instances).""" + project_id = context.project_id + context = context.elevated() + requested_cores = requested_instances * instance_type['vcpus'] + requested_ram = requested_instances * instance_type['memory_mb'] + usage = db.instance_data_get_for_project(context, project_id) + used_instances, used_cores, used_ram = usage + quota = get_project_quotas(context, project_id) + allowed_instances = _get_request_allotment(requested_instances, + used_instances, + quota['instances']) + allowed_cores = _get_request_allotment(requested_cores, used_cores, + quota['cores']) + allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) + if instance_type['vcpus']: + allowed_instances = min(allowed_instances, + allowed_cores // instance_type['vcpus']) + if instance_type['memory_mb']: + allowed_instances = min(allowed_instances, + allowed_ram // instance_type['memory_mb']) + + return min(requested_instances, allowed_instances) + + +def allowed_volumes(context, requested_volumes, size): + """Check quota and return min(requested_volumes, allowed_volumes).""" + project_id = context.project_id + context = context.elevated() + size = int(size) + requested_gigabytes = requested_volumes * size + used_volumes, used_gigabytes = db.volume_data_get_for_project(context, + project_id) + quota = get_project_quotas(context, project_id) + allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, + quota['volumes']) + allowed_gigabytes = _get_request_allotment(requested_gigabytes, + used_gigabytes, + quota['gigabytes']) + if size != 0: + allowed_volumes = min(allowed_volumes, + int(allowed_gigabytes // size)) + return min(requested_volumes, allowed_volumes) + + +def allowed_floating_ips(context, requested_floating_ips): + """Check quota and return min(requested, allowed) floating ips.""" + project_id = context.project_id + context = context.elevated() + used_floating_ips = db.floating_ip_count_by_project(context, project_id) + quota = get_project_quotas(context, project_id) + allowed_floating_ips = _get_request_allotment(requested_floating_ips, + used_floating_ips, + quota['floating_ips']) + return min(requested_floating_ips, allowed_floating_ips) + + +def allowed_security_groups(context, requested_security_groups): + """Check quota and return min(requested, allowed) security groups.""" + project_id = context.project_id + context = context.elevated() + used_sec_groups = db.security_group_count_by_project(context, project_id) + quota = get_project_quotas(context, project_id) + allowed_sec_groups = _get_request_allotment(requested_security_groups, + used_sec_groups, + quota['security_groups']) + return min(requested_security_groups, allowed_sec_groups) + + +def allowed_security_group_rules(context, security_group_id, + requested_rules): + """Check quota and return min(requested, allowed) sec group rules.""" + project_id = context.project_id + context = context.elevated() + used_rules = db.security_group_rule_count_by_group(context, + security_group_id) + quota = get_project_quotas(context, project_id) + allowed_rules = _get_request_allotment(requested_rules, + used_rules, + quota['security_group_rules']) + return min(requested_rules, allowed_rules) + + +def _calculate_simple_quota(context, resource, requested): + """Check quota for resource; return min(requested, allowed).""" + quota = get_project_quotas(context, context.project_id) + allowed = _get_request_allotment(requested, 0, quota[resource]) + return min(requested, allowed) + + +def allowed_metadata_items(context, requested_metadata_items): + """Return the number of metadata items allowed.""" + return _calculate_simple_quota(context, 'metadata_items', + requested_metadata_items) + + +def allowed_injected_files(context, requested_injected_files): + """Return the number of injected files allowed.""" + return _calculate_simple_quota(context, 'injected_files', + requested_injected_files) + + +def allowed_injected_file_content_bytes(context, requested_bytes): + """Return the number of bytes allowed per injected file content.""" + resource = 'injected_file_content_bytes' + return _calculate_simple_quota(context, resource, requested_bytes) + + +def allowed_injected_file_path_bytes(context): + """Return the number of bytes allowed in an injected file path.""" + return FLAGS.quota_injected_file_path_bytes diff --git a/cinder/rootwrap/__init__.py b/cinder/rootwrap/__init__.py new file mode 100755 index 00000000000..671d3c173e2 --- /dev/null +++ b/cinder/rootwrap/__init__.py @@ -0,0 +1,16 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/rootwrap/filters.py b/cinder/rootwrap/filters.py new file mode 100755 index 00000000000..a51ecae3dd9 --- /dev/null +++ b/cinder/rootwrap/filters.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import re + + +class CommandFilter(object): + """Command filter only checking that the 1st argument matches exec_path""" + + def __init__(self, exec_path, run_as, *args): + self.exec_path = exec_path + self.run_as = run_as + self.args = args + + def match(self, userargs): + """Only check that the first argument (command) matches exec_path""" + if (os.path.basename(self.exec_path) == userargs[0]): + return True + return False + + def get_command(self, userargs): + """Returns command to execute (with sudo -u if run_as != root).""" + if (self.run_as != 'root'): + # Used to run commands at lesser privileges + return ['sudo', '-u', self.run_as, self.exec_path] + userargs[1:] + return [self.exec_path] + userargs[1:] + + def get_environment(self, userargs): + """Returns specific environment to set, None if none""" + return None + + +class RegExpFilter(CommandFilter): + """Command filter doing regexp matching for every argument""" + + def match(self, userargs): + # Early skip if command or number of args don't match + if (len(self.args) != len(userargs)): + # DENY: argument numbers don't match + return False + # Compare each arg (anchoring pattern explicitly at end of string) + for (pattern, arg) in zip(self.args, userargs): + try: + if not re.match(pattern + '$', arg): + break + except re.error: + # DENY: Badly-formed filter + return False + else: + # ALLOW: All arguments matched + return True + + # DENY: Some arguments did not match + return False + + +class DnsmasqFilter(CommandFilter): + """Specific filter for the dnsmasq call (which includes env)""" + + def match(self, userargs): + if (userargs[0].startswith("FLAGFILE=") and + userargs[1].startswith("NETWORK_ID=") and + userargs[2] == "dnsmasq"): + return True + return False + + def get_command(self, userargs): + return [self.exec_path] + userargs[3:] + + def get_environment(self, userargs): + env = os.environ.copy() + env['FLAGFILE'] = userargs[0].split('=')[-1] + env['NETWORK_ID'] = userargs[1].split('=')[-1] + return env + + +class KillFilter(CommandFilter): + """Specific filter for the kill calls. + 1st argument is a list of accepted signals (emptystring means no signal) + 2nd argument is a list of accepted affected executables. + + This filter relies on /proc to accurately determine affected + executable, so it will only work on procfs-capable systems (not OSX). + """ + + def match(self, userargs): + if userargs[0] != "kill": + return False + args = list(userargs) + if len(args) == 3: + signal = args.pop(1) + if signal not in self.args[0]: + # Requested signal not in accepted list + return False + else: + if len(args) != 2: + # Incorrect number of arguments + return False + if '' not in self.args[0]: + # No signal, but list doesn't include empty string + return False + try: + command = os.readlink("/proc/%d/exe" % int(args[1])) + # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on + # the end if an executable is updated or deleted + if command.endswith(" (deleted)"): + command = command[:command.rindex(" ")] + if command not in self.args[1]: + # Affected executable not in accepted list + return False + except (ValueError, OSError): + # Incorrect PID + return False + return True + + +class ReadFileFilter(CommandFilter): + """Specific filter for the utils.read_file_as_root call""" + + def __init__(self, file_path, *args): + self.file_path = file_path + super(ReadFileFilter, self).__init__("/bin/cat", "root", *args) + + def match(self, userargs): + if userargs[0] != 'cat': + return False + if userargs[1] != self.file_path: + return False + if len(userargs) != 2: + return False + return True diff --git a/cinder/rootwrap/volume.py b/cinder/rootwrap/volume.py new file mode 100755 index 00000000000..8d96f4600cf --- /dev/null +++ b/cinder/rootwrap/volume.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.rootwrap import filters + +filterlist = [ + # cinder/volume/iscsi.py: iscsi_helper '--op' ... + filters.CommandFilter("/usr/sbin/ietadm", "root"), + filters.CommandFilter("/usr/sbin/tgtadm", "root"), + + # cinder/volume/driver.py: 'vgs', '--noheadings', '-o', 'name' + filters.CommandFilter("/sbin/vgs", "root"), + + # cinder/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. + # cinder/volume/driver.py: 'lvcreate', '-L', ... + filters.CommandFilter("/sbin/lvcreate", "root"), + + # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... + filters.CommandFilter("/bin/dd", "root"), + + # cinder/volume/driver.py: 'lvremove', '-f', "%s/%s" % ... + filters.CommandFilter("/sbin/lvremove", "root"), + + # cinder/volume/driver.py: 'lvdisplay','--noheading','-C','-o','Attr',.. + filters.CommandFilter("/sbin/lvdisplay", "root"), + + # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... + # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... + filters.CommandFilter("/sbin/iscsiadm", "root"), + ] diff --git a/cinder/rootwrap/wrapper.py b/cinder/rootwrap/wrapper.py new file mode 100755 index 00000000000..683224e31fa --- /dev/null +++ b/cinder/rootwrap/wrapper.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import sys + + +FILTERS_MODULES = ['cinder.rootwrap.volume'] + + +def load_filters(): + """Load filters from modules present in cinder.rootwrap.""" + filters = [] + for modulename in FILTERS_MODULES: + try: + __import__(modulename) + module = sys.modules[modulename] + filters = filters + module.filterlist + except ImportError: + # It's OK to have missing filters, since filter modules are + # shipped with specific nodes rather than with python-cinder + pass + return filters + + +def match_filter(filters, userargs): + """ + Checks user command and arguments through command filters and + returns the first matching filter, or None is none matched. + """ + + found_filter = None + + for f in filters: + if f.match(userargs): + # Try other filters if executable is absent + if not os.access(f.exec_path, os.X_OK): + if not found_filter: + found_filter = f + continue + # Otherwise return matching filter for execution + return f + + # No filter matched or first missing executable + return found_filter diff --git a/cinder/rpc/__init__.py b/cinder/rpc/__init__.py new file mode 100644 index 00000000000..0340116f6fb --- /dev/null +++ b/cinder/rpc/__init__.py @@ -0,0 +1,227 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils + + +rpc_opts = [ + cfg.StrOpt('rpc_backend', + default='cinder.rpc.impl_kombu', + help="The messaging module to use, defaults to kombu."), + cfg.IntOpt('rpc_thread_pool_size', + default=64, + help='Size of RPC thread pool'), + cfg.IntOpt('rpc_conn_pool_size', + default=30, + help='Size of RPC connection pool'), + cfg.IntOpt('rpc_response_timeout', + default=60, + help='Seconds to wait for a response from call or multicall'), + cfg.IntOpt('allowed_rpc_exception_modules', + default=['cinder.exception'], + help='Modules of exceptions that are permitted to be recreated' + 'upon receiving exception data from an rpc call.'), + ] + +_CONF = None + + +def register_opts(conf): + global _CONF + _CONF = conf + _CONF.register_opts(rpc_opts) + _get_impl().register_opts(_CONF) + + +def create_connection(new=True): + """Create a connection to the message bus used for rpc. + + For some example usage of creating a connection and some consumers on that + connection, see cinder.service. + + :param new: Whether or not to create a new connection. A new connection + will be created by default. If new is False, the + implementation is free to return an existing connection from a + pool. + + :returns: An instance of cinder.rpc.common.Connection + """ + return _get_impl().create_connection(_CONF, new=new) + + +def call(context, topic, msg, timeout=None): + """Invoke a remote method that returns something. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + cinder.rpc.common.Connection.create_consumer() + and only applies when the consumer was created + with fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + + :returns: A dict from the remote method. + + :raises: cinder.rpc.common.Timeout if a complete response is not received + before the timeout is reached. + """ + return _get_impl().call(_CONF, context, topic, msg, timeout) + + +def cast(context, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + cinder.rpc.common.Connection.create_consumer() + and only applies when the consumer was created + with fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast(_CONF, context, topic, msg) + + +def fanout_cast(context, topic, msg): + """Broadcast a remote method invocation with no return. + + This method will get invoked on all consumers that were set up with this + topic name and fanout=True. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + cinder.rpc.common.Connection.create_consumer() + and only applies when the consumer was created + with fanout=True. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast(_CONF, context, topic, msg) + + +def multicall(context, topic, msg, timeout=None): + """Invoke a remote method and get back an iterator. + + In this case, the remote method will be returning multiple values in + separate messages, so the return values can be processed as the come in via + an iterator. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the rpc message to. This correlates to the + topic argument of + cinder.rpc.common.Connection.create_consumer() + and only applies when the consumer was created + with fanout=False. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + :param timeout: int, number of seconds to use for a response timeout. + If set, this overrides the rpc_response_timeout option. + + :returns: An iterator. The iterator will yield a tuple (N, X) where N is + an index that starts at 0 and increases by one for each value + returned and X is the Nth value that was returned by the remote + method. + + :raises: cinder.rpc.common.Timeout if a complete response is not received + before the timeout is reached. + """ + return _get_impl().multicall(_CONF, context, topic, msg, timeout) + + +def notify(context, topic, msg): + """Send notification event. + + :param context: Information that identifies the user that has made this + request. + :param topic: The topic to send the notification to. + :param msg: This is a dict of content of event. + + :returns: None + """ + return _get_impl().notify(_CONF, context, topic, msg) + + +def cleanup(): + """Clean up resoruces in use by implementation. + + Clean up any resources that have been allocated by the RPC implementation. + This is typically open connections to a messaging service. This function + would get called before an application using this API exits to allow + connections to get torn down cleanly. + + :returns: None + """ + return _get_impl().cleanup() + + +def cast_to_server(context, server_params, topic, msg): + """Invoke a remote method that does not return anything. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().cast_to_server(_CONF, context, server_params, topic, + msg) + + +def fanout_cast_to_server(context, server_params, topic, msg): + """Broadcast to a remote method invocation with no return. + + :param context: Information that identifies the user that has made this + request. + :param server_params: Connection information + :param topic: The topic to send the notification to. + :param msg: This is a dict in the form { "method" : "method_to_invoke", + "args" : dict_of_kwargs } + + :returns: None + """ + return _get_impl().fanout_cast_to_server(_CONF, context, server_params, + topic, msg) + + +_RPCIMPL = None + + +def _get_impl(): + """Delay import of rpc_backend until configuration is loaded.""" + global _RPCIMPL + if _RPCIMPL is None: + _RPCIMPL = importutils.import_module(_CONF.rpc_backend) + return _RPCIMPL diff --git a/cinder/rpc/amqp.py b/cinder/rpc/amqp.py new file mode 100644 index 00000000000..b559ca78d1f --- /dev/null +++ b/cinder/rpc/amqp.py @@ -0,0 +1,405 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Shared code between AMQP based cinder.rpc implementations. + +The code in this module is shared between the rpc implemenations based on AMQP. +Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses +AMQP, but is deprecated and predates this code. +""" + +import inspect +import sys +import uuid + +from eventlet import greenpool +from eventlet import pools +from eventlet import semaphore + +from cinder import context +from cinder import exception +from cinder import log as logging +from cinder.openstack.common import local +import cinder.rpc.common as rpc_common +from cinder import utils + +LOG = logging.getLogger(__name__) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + def __init__(self, conf, connection_cls, *args, **kwargs): + self.connection_cls = connection_cls + self.conf = conf + kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size) + kwargs.setdefault("order_as_stack", True) + super(Pool, self).__init__(*args, **kwargs) + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Pool creating new connection') + return self.connection_cls(self.conf) + + def empty(self): + while self.free_items: + self.get().close() + + +_pool_create_sem = semaphore.Semaphore() + + +def get_connection_pool(conf, connection_cls): + with _pool_create_sem: + # Make sure only one thread tries to create the connection pool. + if not connection_cls.pool: + connection_cls.pool = Pool(conf, connection_cls) + return connection_cls.pool + + +class ConnectionContext(rpc_common.Connection): + """The class that is actually returned to the caller of + create_connection(). This is a essentially a wrapper around + Connection that supports 'with' and can return a new Connection or + one from a pool. It will also catch when an instance of this class + is to be deleted so that we can return Connections to the pool on + exceptions and so forth without making the caller be responsible for + catching all exceptions and making sure to return a connection to + the pool. + """ + + def __init__(self, conf, connection_pool, pooled=True, server_params=None): + """Create a new connection, or get one from the pool""" + self.connection = None + self.conf = conf + self.connection_pool = connection_pool + if pooled: + self.connection = connection_pool.get() + else: + self.connection = connection_pool.connection_cls(conf, + server_params=server_params) + self.pooled = pooled + + def __enter__(self): + """When with ConnectionContext() is used, return self""" + return self + + def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + self.connection_pool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + pass + self.connection = None + + def __exit__(self, exc_type, exc_value, tb): + """End of 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Caller is done with this connection. Make sure we cleaned up.""" + self._done() + + def close(self): + """Caller is done with this connection.""" + self._done() + + def create_consumer(self, topic, proxy, fanout=False): + self.connection.create_consumer(topic, proxy, fanout) + + def consume_in_thread(self): + self.connection.consume_in_thread() + + def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" + if self.connection: + return getattr(self.connection, key) + else: + raise exception.InvalidRPCConnectionReuse() + + +def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None, + ending=False): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext(conf, connection_pool) as conn: + if failure: + failure = rpc_common.serialize_remote_exception(failure) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + if ending: + msg['ending'] = True + conn.direct_send(msg_id, msg) + + +class RpcContext(context.RequestContext): + """Context that supports replying to a rpc.call""" + def __init__(self, *args, **kwargs): + self.msg_id = kwargs.pop('msg_id', None) + self.conf = kwargs.pop('conf') + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, reply=None, failure=None, ending=False, + connection_pool=None): + if self.msg_id: + msg_reply(self.conf, self.msg_id, connection_pool, reply, failure, + ending) + if ending: + self.msg_id = None + + +def unpack_context(conf, msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + context_dict['conf'] = conf + ctx = RpcContext.from_dict(context_dict) + rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict()) + return ctx + + +def pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, conf, proxy, connection_pool): + self.proxy = proxy + self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size) + self.connection_pool = connection_pool + self.conf = conf + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + # It is important to clear the context here, because at this point + # the previous context is stored in local.store.context + if hasattr(local.store, 'context'): + del local.store.context + rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + ctxt = unpack_context(self.conf, message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data, + connection_pool=self.connection_pool) + return + self.pool.spawn_n(self._process_data, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, ctxt, method, args): + """Thread that magically looks for a method on the proxy + object and calls it. + """ + ctxt.update_store() + try: + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + rval = node_func(context=ctxt, **node_args) + # Check if the result was a generator + if inspect.isgenerator(rval): + for x in rval: + ctxt.reply(x, None, connection_pool=self.connection_pool) + else: + ctxt.reply(rval, None, connection_pool=self.connection_pool) + # This final None tells multicall that it is done. + ctxt.reply(ending=True, connection_pool=self.connection_pool) + except Exception as e: + LOG.exception('Exception during message handling') + ctxt.reply(None, sys.exc_info(), + connection_pool=self.connection_pool) + + +class MulticallWaiter(object): + def __init__(self, conf, connection, timeout): + self._connection = connection + self._iterator = connection.iterconsume( + timeout=timeout or conf.rpc_response_timeout) + self._result = None + self._done = False + self._got_ending = False + self._conf = conf + + def done(self): + if self._done: + return + self._done = True + self._iterator.close() + self._iterator = None + self._connection.close() + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + failure = data['failure'] + self._result = rpc_common.deserialize_remote_exception(self._conf, + failure) + + elif data.get('ending', False): + self._got_ending = True + else: + self._result = data['result'] + + def __iter__(self): + """Return a result until we get a 'None' response from consumer""" + if self._done: + raise StopIteration + while True: + try: + self._iterator.next() + except Exception: + with utils.save_and_reraise_exception(): + self.done() + if self._got_ending: + self.done() + raise StopIteration + result = self._result + if isinstance(result, Exception): + self.done() + raise result + yield result + + +def create_connection(conf, new, connection_pool): + """Create a connection""" + return ConnectionContext(conf, connection_pool, pooled=not new) + + +def multicall(conf, context, topic, msg, timeout, connection_pool): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + pack_context(msg, context) + + conn = ConnectionContext(conf, connection_pool) + wait_msg = MulticallWaiter(conf, conn, timeout) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + return wait_msg + + +def call(conf, context, topic, msg, timeout, connection_pool): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout, connection_pool) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg, connection_pool): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(conf, context, topic, msg, connection_pool): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.fanout_send(topic, msg) + + +def cast_to_server(conf, context, server_params, topic, msg, connection_pool): + """Sends a message on a topic to a specific server.""" + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.topic_send(topic, msg) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg, + connection_pool): + """Sends a message on a fanout exchange to a specific server.""" + pack_context(msg, context) + with ConnectionContext(conf, connection_pool, pooled=False, + server_params=server_params) as conn: + conn.fanout_send(topic, msg) + + +def notify(conf, context, topic, msg, connection_pool): + """Sends a notification event on a topic.""" + event_type = msg.get('event_type') + LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals()) + pack_context(msg, context) + with ConnectionContext(conf, connection_pool) as conn: + conn.notify_send(topic, msg) + + +def cleanup(connection_pool): + if connection_pool: + connection_pool.empty() diff --git a/cinder/rpc/common.py b/cinder/rpc/common.py new file mode 100644 index 00000000000..4dfaa3f76a3 --- /dev/null +++ b/cinder/rpc/common.py @@ -0,0 +1,220 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import sys +import traceback + +from cinder import exception +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils +from cinder import utils + + +LOG = logging.getLogger(__name__) + + +class RemoteError(exception.CinderException): + """Signifies that a remote class has raised an exception. + + Contains a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevant info. + + """ + message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.") + + def __init__(self, exc_type=None, value=None, traceback=None): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__(exc_type=exc_type, + value=value, + traceback=traceback) + + +class Timeout(exception.CinderException): + """Signifies that a timeout has occurred. + + This exception is raised if the rpc_response_timeout is reached while + waiting for a response from the remote side. + """ + message = _("Timeout while waiting on RPC response.") + + +class Connection(object): + """A connection, returned by rpc.create_connection(). + + This class represents a connection to the message bus used for rpc. + An instance of this class should never be created by users of the rpc API. + Use rpc.create_connection() instead. + """ + def close(self): + """Close the connection. + + This method must be called when the connection will no longer be used. + It will ensure that any resources associated with the connection, such + as a network connection, and cleaned up. + """ + raise NotImplementedError() + + def create_consumer(self, conf, topic, proxy, fanout=False): + """Create a consumer on this connection. + + A consumer is associated with a message queue on the backend message + bus. The consumer will read messages from the queue, unpack them, and + dispatch them to the proxy object. The contents of the message pulled + off of the queue will determine which method gets called on the proxy + object. + + :param conf: An openstack.common.cfg configuration object. + :param topic: This is a name associated with what to consume from. + Multiple instances of a service may consume from the same + topic. For example, all instances of cinder-compute + consume from a queue called "compute". In that case, the + messages will get distributed amongst the consumers in a + round-robin fashion if fanout=False. If fanout=True, + every consumer associated with this topic will get a + copy of every message. + :param proxy: The object that will handle all incoming messages. + :param fanout: Whether or not this is a fanout topic. See the + documentation for the topic parameter for some + additional comments on this. + """ + raise NotImplementedError() + + def consume_in_thread(self): + """Spawn a thread to handle incoming messages. + + Spawn a thread that will be responsible for handling all incoming + messages for consumers that were set up on this connection. + + Message dispatching inside of this is expected to be implemented in a + non-blocking manner. An example implementation would be having this + thread pull messages in for all of the consumers, but utilize a thread + pool for dispatching the messages to the proxy objects. + """ + raise NotImplementedError() + + +def _safe_log(log_func, msg, msg_data): + """Sanitizes the msg_data field before logging.""" + SANITIZE = { + 'set_admin_password': ('new_pass',), + 'run_instance': ('admin_password',), + } + + has_method = 'method' in msg_data and msg_data['method'] in SANITIZE + has_context_token = '_context_auth_token' in msg_data + has_token = 'auth_token' in msg_data + + if not any([has_method, has_context_token, has_token]): + return log_func(msg, msg_data) + + msg_data = copy.deepcopy(msg_data) + + if has_method: + method = msg_data['method'] + if method in SANITIZE: + args_to_sanitize = SANITIZE[method] + for arg in args_to_sanitize: + try: + msg_data['args'][arg] = "" + except KeyError: + pass + + if has_context_token: + msg_data['_context_auth_token'] = '' + + if has_token: + msg_data['auth_token'] = '' + + return log_func(msg, msg_data) + + +def serialize_remote_exception(failure_info): + """Prepares exception data to be sent over rpc. + + Failure_info should be a sys.exc_info() tuple. + + """ + tb = traceback.format_exception(*failure_info) + failure = failure_info[1] + LOG.error(_("Returning exception %s to caller"), unicode(failure)) + LOG.error(tb) + + kwargs = {} + if hasattr(failure, 'kwargs'): + kwargs = failure.kwargs + + data = { + 'class': str(failure.__class__.__name__), + 'module': str(failure.__class__.__module__), + 'message': unicode(failure), + 'tb': tb, + 'args': failure.args, + 'kwargs': kwargs + } + + json_data = utils.dumps(data) + + return json_data + + +def deserialize_remote_exception(conf, data): + failure = utils.loads(str(data)) + + trace = failure.get('tb', []) + message = failure.get('message', "") + "\n" + "\n".join(trace) + name = failure.get('class') + module = failure.get('module') + + # NOTE(ameade): We DO NOT want to allow just any module to be imported, in + # order to prevent arbitrary code execution. + if not module in conf.allowed_rpc_exception_modules: + return RemoteError(name, failure.get('message'), trace) + + try: + mod = importutils.import_module(module) + klass = getattr(mod, name) + if not issubclass(klass, Exception): + raise TypeError("Can only deserialize Exceptions") + + failure = klass(**failure.get('kwargs', {})) + except (AttributeError, TypeError, ImportError): + return RemoteError(name, failure.get('message'), trace) + + ex_type = type(failure) + str_override = lambda self: message + new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,), + {'__str__': str_override}) + try: + # NOTE(ameade): Dynamically create a new exception type and swap it in + # as the new type for the exception. This only works on user defined + # Exceptions and not core python exceptions. This is important because + # we cannot necessarily change an exception message so we must override + # the __str__ method. + failure.__class__ = new_ex_type + except TypeError as e: + # NOTE(ameade): If a core exception then just add the traceback to the + # first exception argument. + failure.args = (message,) + failure.args[1:] + return failure diff --git a/cinder/rpc/impl_fake.py b/cinder/rpc/impl_fake.py new file mode 100644 index 00000000000..dc3b00ecaa5 --- /dev/null +++ b/cinder/rpc/impl_fake.py @@ -0,0 +1,185 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fake RPC implementation which calls proxy methods directly with no +queues. Casts will block, but this is very useful for tests. +""" + +import inspect +import json +import signal +import sys +import time +import traceback + +import eventlet + +from cinder import context +from cinder.rpc import common as rpc_common + +CONSUMERS = {} + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + super(RpcContext, self).__init__(*args, **kwargs) + self._response = [] + self._done = False + + def reply(self, reply=None, failure=None, ending=False): + if ending: + self._done = True + if not self._done: + self._response.append((reply, failure)) + + +class Consumer(object): + def __init__(self, topic, proxy): + self.topic = topic + self.proxy = proxy + + def call(self, context, method, args, timeout): + node_func = getattr(self.proxy, method) + node_args = dict((str(k), v) for k, v in args.iteritems()) + done = eventlet.event.Event() + + def _inner(): + ctxt = RpcContext.from_dict(context.to_dict()) + try: + rval = node_func(context=ctxt, **node_args) + res = [] + # Caller might have called ctxt.reply() manually + for (reply, failure) in ctxt._response: + if failure: + raise failure[0], failure[1], failure[2] + res.append(reply) + # if ending not 'sent'...we might have more data to + # return from the function itself + if not ctxt._done: + if inspect.isgenerator(rval): + for val in rval: + res.append(val) + else: + res.append(rval) + done.send(res) + except Exception as e: + done.send_exception(e) + + thread = eventlet.greenthread.spawn(_inner) + + if timeout: + start_time = time.time() + while not done.ready(): + eventlet.greenthread.sleep(1) + cur_time = time.time() + if (cur_time - start_time) > timeout: + thread.kill() + raise rpc_common.Timeout() + + return done.wait() + + +class Connection(object): + """Connection object.""" + + def __init__(self): + self.consumers = [] + + def create_consumer(self, topic, proxy, fanout=False): + consumer = Consumer(topic, proxy) + self.consumers.append(consumer) + if topic not in CONSUMERS: + CONSUMERS[topic] = [] + CONSUMERS[topic].append(consumer) + + def close(self): + for consumer in self.consumers: + CONSUMERS[consumer.topic].remove(consumer) + self.consumers = [] + + def consume_in_thread(self): + pass + + +def create_connection(conf, new=True): + """Create a connection""" + return Connection() + + +def check_serialize(msg): + """Make sure a message intended for rpc can be serialized.""" + json.dumps(msg) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + + check_serialize(msg) + + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + + try: + consumer = CONSUMERS[topic][0] + except (KeyError, IndexError): + return iter([None]) + else: + return consumer.call(context, method, args, timeout) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + rv = multicall(conf, context, topic, msg, timeout) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(conf, context, topic, msg): + try: + call(conf, context, topic, msg) + except Exception: + pass + + +def notify(conf, context, topic, msg): + check_serialize(msg) + + +def cleanup(): + pass + + +def fanout_cast(conf, context, topic, msg): + """Cast to all consumers of a topic""" + check_serialize(msg) + method = msg.get('method') + if not method: + return + args = msg.get('args', {}) + + for consumer in CONSUMERS.get(topic, []): + try: + consumer.call(context, method, args, None) + except Exception: + pass + + +def register_opts(conf): + pass diff --git a/cinder/rpc/impl_kombu.py b/cinder/rpc/impl_kombu.py new file mode 100644 index 00000000000..c64356cf860 --- /dev/null +++ b/cinder/rpc/impl_kombu.py @@ -0,0 +1,713 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import socket +import ssl +import sys +import time +import uuid + +import eventlet +import greenlet +import kombu +import kombu.entity +import kombu.messaging +import kombu.connection + +from cinder.openstack.common import cfg +from cinder.rpc import amqp as rpc_amqp +from cinder.rpc import common as rpc_common + +kombu_opts = [ + cfg.StrOpt('kombu_ssl_version', + default='', + help='SSL version to use (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_keyfile', + default='', + help='SSL key file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_certfile', + default='', + help='SSL cert file (valid only if SSL enabled)'), + cfg.StrOpt('kombu_ssl_ca_certs', + default='', + help=('SSL certification authority file ' + '(valid only if SSL enabled)')), + ] + +LOG = rpc_common.LOG + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Declare a queue on an amqp channel. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-declare the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + try: + callback(message.payload) + message.ack() + except Exception: + LOG.exception(_("Failed to process message... skipping it.")) + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectConsumer, self).__init__( + channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, channel, topic, callback, tag, **kwargs): + """Init a 'topic' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': conf.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=conf.control_exchange, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicConsumer, self).__init__( + channel, + callback, + tag, + name=topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=exchange_name, + type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutConsumer, self).__init__( + channel, + callback, + tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, + msg_id, + msg_id, + type='direct', + **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': conf.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + super(TopicPublisher, self).__init__(channel, + conf.control_exchange, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, + '%s_fanout' % topic, + None, + type='fanout', + **options) + + +class NotifyPublisher(TopicPublisher): + """Publisher class for 'notify'""" + + def __init__(self, conf, channel, topic, **kwargs): + self.durable = kwargs.pop('durable', conf.rabbit_durable_queues) + super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs) + + def reconnect(self, channel): + super(NotifyPublisher, self).reconnect(channel) + + # NOTE(jerdfelt): Normally the consumer would create the queue, but + # we do this to ensure that messages don't get dropped if the + # consumer is started after we do + queue = kombu.entity.Queue(channel=channel, + exchange=self.exchange, + durable=self.durable, + name=self.routing_key, + routing_key=self.routing_key) + queue.declare() + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.consumers = [] + self.consumer_thread = None + self.conf = conf + self.max_retries = self.conf.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None + self.interval_start = self.conf.rabbit_retry_interval + self.interval_stepping = self.conf.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 + self.memory_transport = False + + if server_params is None: + server_params = {} + + # Keys to translate from server_params to kombu params + server_params_to_kombu_params = {'username': 'userid'} + + params = {} + for sp_key, value in server_params.iteritems(): + p_key = server_params_to_kombu_params.get(sp_key, sp_key) + params[p_key] = value + + params.setdefault('hostname', self.conf.rabbit_host) + params.setdefault('port', self.conf.rabbit_port) + params.setdefault('userid', self.conf.rabbit_userid) + params.setdefault('password', self.conf.rabbit_password) + params.setdefault('virtual_host', self.conf.rabbit_virtual_host) + + self.params = params + + if self.conf.fake_rabbit: + self.params['transport'] = 'memory' + self.memory_transport = True + else: + self.memory_transport = False + + if self.conf.rabbit_use_ssl: + self.params['ssl'] = self._fetch_ssl_params() + + self.connection = None + self.reconnect() + + def _fetch_ssl_params(self): + """Handles fetching what ssl params + should be used for the connection (if any)""" + ssl_params = dict() + + # http://docs.python.org/library/ssl.html - ssl.wrap_socket + if self.conf.kombu_ssl_version: + ssl_params['ssl_version'] = self.conf.kombu_ssl_version + if self.conf.kombu_ssl_keyfile: + ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile + if self.conf.kombu_ssl_certfile: + ssl_params['certfile'] = self.conf.kombu_ssl_certfile + if self.conf.kombu_ssl_ca_certs: + ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs + # We might want to allow variations in the + # future with this? + ssl_params['cert_reqs'] = ssl.CERT_REQUIRED + + if not ssl_params: + # Just have the default behavior + return True + else: + # Return the extended behavior + return ssl_params + + def _connect(self): + """Connect to rabbit. Re-establish any queues that may have + been declared before if we are reconnecting. Exceptions should + be handled by the caller. + """ + if self.connection: + LOG.info(_("Reconnecting to AMQP server on " + "%(hostname)s:%(port)d") % self.params) + try: + self.connection.close() + except self.connection_errors: + pass + # Setting this in case the next statement fails, though + # it shouldn't be doing any network operations, yet. + self.connection = None + self.connection = kombu.connection.BrokerConnection( + **self.params) + self.connection_errors = self.connection.connection_errors + if self.memory_transport: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 + self.consumer_num = itertools.count(1) + self.connection.connect() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + for consumer in self.consumers: + consumer.reconnect(self.channel) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'), + self.params) + + def reconnect(self): + """Handles reconnecting and re-establishing queues. + Will retry up to self.max_retries number of times. + self.max_retries = 0 means to retry forever. + Sleep between tries, starting at self.interval_start + seconds, backing off self.interval_stepping number of seconds + each attempt. + """ + + attempt = 0 + while True: + attempt += 1 + try: + self._connect() + return + except (self.connection_errors, IOError), e: + pass + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + + log_info = {} + log_info['err_str'] = str(e) + log_info['max_retries'] = self.max_retries + log_info.update(self.params) + + if self.max_retries and attempt == self.max_retries: + LOG.exception(_('Unable to connect to AMQP server on ' + '%(hostname)s:%(port)d after %(max_retries)d ' + 'tries: %(err_str)s') % log_info) + # NOTE(comstud): Copied from original code. There's + # really no better recourse because if this was a queue we + # need to consume on, we have no way to consume anymore. + sys.exit(1) + + if attempt == 1: + sleep_time = self.interval_start or 1 + elif attempt > 1: + sleep_time += self.interval_stepping + if self.interval_max: + sleep_time = min(sleep_time, self.interval_max) + + log_info['sleep_time'] = sleep_time + LOG.exception(_('AMQP server on %(hostname)s:%(port)d is' + ' unreachable: %(err_str)s. Trying again in ' + '%(sleep_time)d seconds.') % log_info) + time.sleep(sleep_time) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (self.connection_errors, socket.timeout, IOError), e: + pass + except Exception, e: + # NOTE(comstud): Unfortunately it's possible for amqplib + # to return an error not covered by its transport + # connection_errors in the case of a timeout waiting for + # a protocol response. (See paste link in LP888621) + # So, we check all exceptions for 'timeout' in them + # and try to reconnect in this case. + if 'timeout' not in str(e): + raise + if error_callback: + error_callback(e) + self.reconnect() + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.channel.close() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + self.consumers = [] + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + info = {'do_consume': True} + + def _error_callback(exc): + if isinstance(exc, socket.timeout): + LOG.exception(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + info['do_consume'] = True + + def _consume(): + if info['do_consume']: + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + info['do_consume'] = False + return self.connection.drain_events(timeout=timeout) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def publisher_send(self, cls, topic, msg, **kwargs): + """Send to a publisher based on the publisher class""" + + def _error_callback(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publish(): + publisher = cls(self.conf, self.channel, topic, **kwargs) + publisher.send(msg) + + self.ensure(_error_callback, _publish) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In cinder's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg, **kwargs) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, + rpc_amqp.get_connection_pool(self, Connection)) + + if fanout: + self.declare_fanout_consumer(topic, proxy_cb) + else: + self.declare_topic_consumer(topic, proxy_cb) + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection(conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall(conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call(conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server(conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.cast_to_server(conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg): + """Sends a notification event on a topic.""" + return rpc_amqp.notify(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) + + +def register_opts(conf): + conf.register_opts(kombu_opts) diff --git a/cinder/rpc/impl_qpid.py b/cinder/rpc/impl_qpid.py new file mode 100644 index 00000000000..95ab00741e8 --- /dev/null +++ b/cinder/rpc/impl_qpid.py @@ -0,0 +1,563 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# Copyright 2011 - 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import time +import uuid +import json + +import eventlet +import greenlet +import qpid.messaging +import qpid.messaging.exceptions + +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.rpc import amqp as rpc_amqp +from cinder.rpc import common as rpc_common + +LOG = logging.getLogger(__name__) + +qpid_opts = [ + cfg.StrOpt('qpid_hostname', + default='localhost', + help='Qpid broker hostname'), + cfg.StrOpt('qpid_port', + default='5672', + help='Qpid broker port'), + cfg.StrOpt('qpid_username', + default='', + help='Username for qpid connection'), + cfg.StrOpt('qpid_password', + default='', + help='Password for qpid connection'), + cfg.StrOpt('qpid_sasl_mechanisms', + default='', + help='Space separated list of SASL mechanisms to use for auth'), + cfg.BoolOpt('qpid_reconnect', + default=True, + help='Automatically reconnect'), + cfg.IntOpt('qpid_reconnect_timeout', + default=0, + help='Reconnection timeout in seconds'), + cfg.IntOpt('qpid_reconnect_limit', + default=0, + help='Max reconnections before giving up'), + cfg.IntOpt('qpid_reconnect_interval_min', + default=0, + help='Minimum seconds between reconnection attempts'), + cfg.IntOpt('qpid_reconnect_interval_max', + default=0, + help='Maximum seconds between reconnection attempts'), + cfg.IntOpt('qpid_reconnect_interval', + default=0, + help='Equivalent to setting max and min to the same value'), + cfg.IntOpt('qpid_heartbeat', + default=5, + help='Seconds between connection keepalive heartbeats'), + cfg.StrOpt('qpid_protocol', + default='tcp', + help="Transport to use, either 'tcp' or 'ssl'"), + cfg.BoolOpt('qpid_tcp_nodelay', + default=True, + help='Disable Nagle algorithm'), + ] + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, session, callback, node_name, node_opts, + link_name, link_opts): + """Declare a queue on an amqp session. + + 'session' is the amqp session to use + 'callback' is the callback to call when messages are received + 'node_name' is the first part of the Qpid address string, before ';' + 'node_opts' will be applied to the "x-declare" section of "node" + in the address string. + 'link_name' goes into the "name" field of the "link" in the address + string + 'link_opts' will be applied to the "x-declare" section of "link" + in the address string. + """ + self.callback = callback + self.receiver = None + self.session = None + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True, + }, + }, + "link": { + "name": link_name, + "durable": True, + "x-declare": { + "durable": False, + "auto-delete": True, + "exclusive": False, + }, + }, + } + addr_opts["node"]["x-declare"].update(node_opts) + addr_opts["link"]["x-declare"].update(link_opts) + + self.address = "%s ; %s" % (node_name, json.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-declare the receiver after a qpid reconnect""" + self.session = session + self.receiver = session.receiver(self.address) + self.receiver.capacity = 1 + + def consume(self): + """Fetch the message and pass it to the callback object""" + message = self.receiver.fetch() + self.callback(message.content) + + def get_receiver(self): + return self.receiver + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, conf, session, msg_id, callback): + """Init a 'direct' queue. + + 'session' is the amqp session to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + """ + + super(DirectConsumer, self).__init__(session, callback, + "%s/%s" % (msg_id, msg_id), + {"type": "direct"}, + msg_id, + {"exclusive": True}) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, conf, session, topic, callback): + """Init a 'topic' queue. + + 'session' is the amqp session to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + """ + + super(TopicConsumer, self).__init__(session, callback, + "%s/%s" % (conf.control_exchange, topic), {}, + topic, {}) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, conf, session, topic, callback): + """Init a 'fanout' queue. + + 'session' is the amqp session to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + """ + + super(FanoutConsumer, self).__init__(session, callback, + "%s_fanout" % topic, + {"durable": False, "type": "fanout"}, + "%s_fanout_%s" % (topic, uuid.uuid4().hex), + {"exclusive": True}) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, session, node_name, node_opts=None): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.sender = None + self.session = session + + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": False, + # auto-delete isn't implemented for exchanges in qpid, + # but put in here anyway + "auto-delete": True, + }, + }, + } + if node_opts: + addr_opts["node"]["x-declare"].update(node_opts) + + self.address = "%s ; %s" % (node_name, json.dumps(addr_opts)) + + self.reconnect(session) + + def reconnect(self, session): + """Re-establish the Sender after a reconnection""" + self.sender = session.sender(self.address) + + def send(self, msg): + """Send a message""" + self.sender.send(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, conf, session, msg_id): + """Init a 'direct' publisher.""" + super(DirectPublisher, self).__init__(session, msg_id, + {"type": "Direct"}) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + super(TopicPublisher, self).__init__(session, + "%s/%s" % (conf.control_exchange, topic)) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, conf, session, topic): + """init a 'fanout' publisher. + """ + super(FanoutPublisher, self).__init__(session, + "%s_fanout" % topic, {"type": "fanout"}) + + +class NotifyPublisher(Publisher): + """Publisher class for notifications""" + def __init__(self, conf, session, topic): + """init a 'topic' publisher. + """ + super(NotifyPublisher, self).__init__(session, + "%s/%s" % (conf.control_exchange, topic), + {"durable": True}) + + +class Connection(object): + """Connection object.""" + + pool = None + + def __init__(self, conf, server_params=None): + self.session = None + self.consumers = {} + self.consumer_thread = None + self.conf = conf + + if server_params is None: + server_params = {} + + default_params = dict(hostname=self.conf.qpid_hostname, + port=self.conf.qpid_port, + username=self.conf.qpid_username, + password=self.conf.qpid_password) + + params = server_params + for key in default_params.keys(): + params.setdefault(key, default_params[key]) + + self.broker = params['hostname'] + ":" + str(params['port']) + # Create the connection - this does not open the connection + self.connection = qpid.messaging.Connection(self.broker) + + # Check if flags are set and if so set them for the connection + # before we call open + self.connection.username = params['username'] + self.connection.password = params['password'] + self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms + self.connection.reconnect = self.conf.qpid_reconnect + if self.conf.qpid_reconnect_timeout: + self.connection.reconnect_timeout = ( + self.conf.qpid_reconnect_timeout) + if self.conf.qpid_reconnect_limit: + self.connection.reconnect_limit = self.conf.qpid_reconnect_limit + if self.conf.qpid_reconnect_interval_max: + self.connection.reconnect_interval_max = ( + self.conf.qpid_reconnect_interval_max) + if self.conf.qpid_reconnect_interval_min: + self.connection.reconnect_interval_min = ( + self.conf.qpid_reconnect_interval_min) + if self.conf.qpid_reconnect_interval: + self.connection.reconnect_interval = ( + self.conf.qpid_reconnect_interval) + self.connection.hearbeat = self.conf.qpid_heartbeat + self.connection.protocol = self.conf.qpid_protocol + self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay + + # Open is part of reconnect - + # NOTE(WGH) not sure we need this with the reconnect flags + self.reconnect() + + def _register_consumer(self, consumer): + self.consumers[str(consumer.get_receiver())] = consumer + + def _lookup_consumer(self, receiver): + return self.consumers[str(receiver)] + + def reconnect(self): + """Handles reconnecting and re-establishing sessions and queues""" + if self.connection.opened(): + try: + self.connection.close() + except qpid.messaging.exceptions.ConnectionError: + pass + + while True: + try: + self.connection.open() + except qpid.messaging.exceptions.ConnectionError, e: + LOG.error(_('Unable to connect to AMQP server: %s'), e) + time.sleep(self.conf.qpid_reconnect_interval or 1) + else: + break + + LOG.info(_('Connected to AMQP server on %s'), self.broker) + + self.session = self.connection.session() + + for consumer in self.consumers.itervalues(): + consumer.reconnect(self.session) + + if self.consumers: + LOG.debug(_("Re-established AMQP queues")) + + def ensure(self, error_callback, method, *args, **kwargs): + while True: + try: + return method(*args, **kwargs) + except (qpid.messaging.exceptions.Empty, + qpid.messaging.exceptions.ConnectionError), e: + if error_callback: + error_callback(e) + self.reconnect() + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.connection.close() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.session.close() + self.session = self.connection.session() + self.consumers = {} + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.error(_("Failed to declare consumer for topic '%(topic)s': " + "%(err_str)s") % log_info) + + def _declare_consumer(): + consumer = consumer_cls(self.conf, self.session, topic, callback) + self._register_consumer(consumer) + return consumer + + return self.ensure(_connect_error, _declare_consumer) + + def iterconsume(self, limit=None, timeout=None): + """Return an iterator that will consume from all queues/consumers""" + + def _error_callback(exc): + if isinstance(exc, qpid.messaging.exceptions.Empty): + LOG.exception(_('Timed out waiting for RPC response: %s') % + str(exc)) + raise rpc_common.Timeout() + else: + LOG.exception(_('Failed to consume message from queue: %s') % + str(exc)) + + def _consume(): + nxt_receiver = self.session.next_receiver(timeout=timeout) + try: + self._lookup_consumer(nxt_receiver).consume() + except Exception: + LOG.exception(_("Error processing message. Skipping it.")) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.ensure(_error_callback, _consume) + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + + def _connect_error(exc): + log_info = {'topic': topic, 'err_str': str(exc)} + LOG.exception(_("Failed to publish message to topic " + "'%(topic)s': %(err_str)s") % log_info) + + def _publisher_send(): + publisher = cls(self.conf, self.session, topic) + publisher.send(msg) + + return self.ensure(_connect_error, _publisher_send) + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In cinder's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def notify_send(self, topic, msg, **kwargs): + """Send a notify message on a topic""" + self.publisher_send(NotifyPublisher, topic, msg) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + proxy_cb = rpc_amqp.ProxyCallback(self.conf, proxy, + rpc_amqp.get_connection_pool(self, Connection)) + + if fanout: + consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb) + else: + consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb) + + self._register_consumer(consumer) + + return consumer + + +def create_connection(conf, new=True): + """Create a connection""" + return rpc_amqp.create_connection(conf, new, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def multicall(conf, context, topic, msg, timeout=None): + """Make a call that returns multiple times.""" + return rpc_amqp.multicall(conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def call(conf, context, topic, msg, timeout=None): + """Sends a message on a topic and wait for a response.""" + return rpc_amqp.call(conf, context, topic, msg, timeout, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast(conf, context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + return rpc_amqp.cast(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast(conf, context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + return rpc_amqp.fanout_cast(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a topic to a specific server.""" + return rpc_amqp.cast_to_server(conf, context, server_params, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def fanout_cast_to_server(conf, context, server_params, topic, msg): + """Sends a message on a fanout exchange to a specific server.""" + return rpc_amqp.fanout_cast_to_server(conf, context, server_params, topic, + msg, rpc_amqp.get_connection_pool(conf, Connection)) + + +def notify(conf, context, topic, msg): + """Sends a notification event on a topic.""" + return rpc_amqp.notify(conf, context, topic, msg, + rpc_amqp.get_connection_pool(conf, Connection)) + + +def cleanup(): + return rpc_amqp.cleanup(Connection.pool) + + +def register_opts(conf): + conf.register_opts(qpid_opts) diff --git a/cinder/scheduler/__init__.py b/cinder/scheduler/__init__.py new file mode 100644 index 00000000000..ecc937ab1c9 --- /dev/null +++ b/cinder/scheduler/__init__.py @@ -0,0 +1,27 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.scheduler` -- Scheduler Nodes +===================================================== + +.. automodule:: cinder.scheduler + :platform: Unix + :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh +.. moduleauthor:: Ed Leafe +.. moduleauthor:: Chris Behrens +""" diff --git a/cinder/scheduler/api.py b/cinder/scheduler/api.py new file mode 100644 index 00000000000..c7065c5ccda --- /dev/null +++ b/cinder/scheduler/api.py @@ -0,0 +1,72 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to schedulers. +""" + +from cinder import flags +from cinder import log as logging +from cinder import rpc + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def _call_scheduler(method, context, params=None): + """Generic handler for RPC calls to the scheduler. + + :param params: Optional dictionary of arguments to be passed to the + scheduler worker + + :retval: Result returned by scheduler worker + """ + if not params: + params = {} + queue = FLAGS.scheduler_topic + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + + +def get_host_list(context): + """Return a list of hosts associated with this zone.""" + return _call_scheduler('get_host_list', context) + + +def get_service_capabilities(context): + """Return aggregated capabilities for all services.""" + return _call_scheduler('get_service_capabilities', context) + + +def update_service_capabilities(context, service_name, host, capabilities): + """Send an update to all the scheduler services informing them + of the capabilities of this service.""" + kwargs = dict(method='update_service_capabilities', + args=dict(service_name=service_name, host=host, + capabilities=capabilities)) + return rpc.fanout_cast(context, 'scheduler', kwargs) + + +def live_migration(context, block_migration, disk_over_commit, + instance_id, dest, topic): + """Migrate a server to a new host""" + params = {"instance_id": instance_id, + "dest": dest, + "topic": topic, + "block_migration": block_migration, + "disk_over_commit": disk_over_commit} + # NOTE(comstud): Call vs cast so we can get exceptions back, otherwise + # this call in the scheduler driver doesn't return anything. + _call_scheduler("live_migration", context=context, params=params) diff --git a/cinder/scheduler/chance.py b/cinder/scheduler/chance.py new file mode 100644 index 00000000000..064c44a91cb --- /dev/null +++ b/cinder/scheduler/chance.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Chance (Random) Scheduler implementation +""" + +import random + +from cinder import exception +from cinder.scheduler import driver + + +class ChanceScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def _filter_hosts(self, request_spec, hosts, **kwargs): + """Filter a list of hosts based on request_spec.""" + + filter_properties = kwargs.get('filter_properties', {}) + ignore_hosts = filter_properties.get('ignore_hosts', []) + hosts = [host for host in hosts if host not in ignore_hosts] + return hosts + + def _schedule(self, context, topic, request_spec, **kwargs): + """Picks a host that is up at random.""" + + elevated = context.elevated() + hosts = self.hosts_up(elevated, topic) + if not hosts: + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + hosts = self._filter_hosts(request_spec, hosts, **kwargs) + if not hosts: + msg = _("Could not find another compute") + raise exception.NoValidHost(reason=msg) + + return hosts[int(random.random() * len(hosts))] + + def schedule(self, context, topic, method, *_args, **kwargs): + """Picks a host that is up at random.""" + + host = self._schedule(context, topic, None, **kwargs) + driver.cast_to_host(context, topic, host, method, **kwargs) + + def schedule_run_instance(self, context, request_spec, *_args, **kwargs): + """Create and run an instance or instances""" + num_instances = request_spec.get('num_instances', 1) + instances = [] + for num in xrange(num_instances): + host = self._schedule(context, 'compute', request_spec, **kwargs) + request_spec['instance_properties']['launch_index'] = num + instance = self.create_instance_db_entry(context, request_spec) + driver.cast_to_compute_host(context, host, + 'run_instance', instance_uuid=instance['uuid'], **kwargs) + instances.append(driver.encode_instance(instance)) + # So if we loop around, create_instance_db_entry will actually + # create a new entry, instead of assume it's been created + # already + del request_spec['instance_properties']['uuid'] + return instances + + def schedule_prep_resize(self, context, request_spec, *args, **kwargs): + """Select a target for resize.""" + host = self._schedule(context, 'compute', request_spec, **kwargs) + driver.cast_to_compute_host(context, host, 'prep_resize', **kwargs) diff --git a/cinder/scheduler/driver.py b/cinder/scheduler/driver.py new file mode 100644 index 00000000000..d84893ee6f2 --- /dev/null +++ b/cinder/scheduler/driver.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler base class that all Schedulers should inherit from +""" + +from cinder import db +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils +from cinder import rpc +from cinder.rpc import common as rpc_common +from cinder import utils + + +LOG = logging.getLogger(__name__) + +scheduler_driver_opts = [ + cfg.StrOpt('scheduler_host_manager', + default='cinder.scheduler.host_manager.HostManager', + help='The scheduler host manager class to use'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(scheduler_driver_opts) + + +def cast_to_volume_host(context, host, method, update_db=True, **kwargs): + """Cast request to a volume host queue""" + + if update_db: + volume_id = kwargs.get('volume_id', None) + if volume_id is not None: + now = utils.utcnow() + db.volume_update(context, volume_id, + {'host': host, 'scheduled_at': now}) + rpc.cast(context, + db.queue_get_for(context, 'volume', host), + {"method": method, "args": kwargs}) + LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals()) + + +def cast_to_host(context, topic, host, method, update_db=True, **kwargs): + """Generic cast to host""" + + topic_mapping = { + "volume": cast_to_volume_host} + + func = topic_mapping.get(topic) + if func: + func(context, host, method, update_db=update_db, **kwargs) + else: + rpc.cast(context, + db.queue_get_for(context, topic, host), + {"method": method, "args": kwargs}) + LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") + % locals()) + + +def encode_instance(instance, local=True): + """Encode locally created instance for return via RPC""" + # TODO(comstud): I would love to be able to return the full + # instance information here, but we'll need some modifications + # to the RPC code to handle datetime conversions with the + # json encoding/decoding. We should be able to set a default + # json handler somehow to do it. + # + # For now, I'll just return the instance ID and let the caller + # do a DB lookup :-/ + if local: + return dict(id=instance['id'], _is_precooked=False) + else: + inst = dict(instance) + inst['_is_precooked'] = True + return inst + + +class Scheduler(object): + """The base class that all Scheduler classes should inherit from.""" + + def __init__(self): + self.host_manager = importutils.import_object( + FLAGS.scheduler_host_manager) + + def get_host_list(self): + """Get a list of hosts from the HostManager.""" + return self.host_manager.get_host_list() + + def get_service_capabilities(self): + """Get the normalized set of capabilities for the services. + """ + return self.host_manager.get_service_capabilities() + + def update_service_capabilities(self, service_name, host, capabilities): + """Process a capability update from a service node.""" + self.host_manager.update_service_capabilities(service_name, + host, capabilities) + + def hosts_up(self, context, topic): + """Return the list of hosts that have a running service for topic.""" + + services = db.service_get_all_by_topic(context, topic) + return [service['host'] + for service in services + if utils.service_is_up(service)] + + def schedule(self, context, topic, method, *_args, **_kwargs): + """Must override schedule method for scheduler to work.""" + raise NotImplementedError(_("Must implement a fallback schedule")) + + def schedule_prep_resize(self, context, request_spec, *_args, **_kwargs): + """Must override schedule_prep_resize method for scheduler to work.""" + msg = _("Driver must implement schedule_prep_resize") + raise NotImplementedError(msg) + + def mounted_on_same_shared_storage(self, context, instance_ref, dest): + """Check if the src and dest host mount same shared storage. + + At first, dest host creates temp file, and src host can see + it if they mounts same shared storage. Then src host erase it. + + :param context: security context + :param instance_ref: cinder.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + src = instance_ref['host'] + dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) + src_t = db.queue_get_for(context, FLAGS.compute_topic, src) + + filename = rpc.call(context, dst_t, + {"method": 'create_shared_storage_test_file'}) + + try: + # make sure existence at src host. + ret = rpc.call(context, src_t, + {"method": 'check_shared_storage_test_file', + "args": {'filename': filename}}) + + finally: + rpc.cast(context, dst_t, + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': filename}}) + + return ret diff --git a/cinder/scheduler/host_manager.py b/cinder/scheduler/host_manager.py new file mode 100644 index 00000000000..99632bc8691 --- /dev/null +++ b/cinder/scheduler/host_manager.py @@ -0,0 +1,36 @@ +# Copyright (c) 2011 OpenStack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Manage hosts in the current zone. +""" + +# FIXME(ja): this code was written only for compute. re-implement for volumes + + +class HostState(object): + pass + + +class HostManager(object): + + def get_host_list(self, *args): + pass + + def update_service_capabilities(self, *args): + pass + + def get_service_capabilities(self, *args): + pass diff --git a/cinder/scheduler/manager.py b/cinder/scheduler/manager.py new file mode 100644 index 00000000000..35e73db4685 --- /dev/null +++ b/cinder/scheduler/manager.py @@ -0,0 +1,204 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Scheduler Service +""" + +import functools + +from cinder import db +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import manager +from cinder.notifier import api as notifier +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils +from cinder import utils + + +LOG = logging.getLogger(__name__) + +scheduler_driver_opt = cfg.StrOpt('scheduler_driver', + default='cinder.scheduler.simple.SimpleScheduler', + help='Default driver to use for the scheduler') + +FLAGS = flags.FLAGS +FLAGS.register_opt(scheduler_driver_opt) + + +class SchedulerManager(manager.Manager): + """Chooses a host to run instances on.""" + + def __init__(self, scheduler_driver=None, *args, **kwargs): + if not scheduler_driver: + scheduler_driver = FLAGS.scheduler_driver + self.driver = importutils.import_object(scheduler_driver) + super(SchedulerManager, self).__init__(*args, **kwargs) + + def __getattr__(self, key): + """Converts all method calls to use the schedule method""" + return functools.partial(self._schedule, key) + + def get_host_list(self, context): + """Get a list of hosts from the HostManager.""" + return self.driver.get_host_list() + + def get_service_capabilities(self, context): + """Get the normalized set of capabilities for this zone.""" + return self.driver.get_service_capabilities() + + def update_service_capabilities(self, context, service_name=None, + host=None, capabilities=None, **kwargs): + """Process a capability update from a service node.""" + if capabilities is None: + capabilities = {} + self.driver.update_service_capabilities(service_name, host, + capabilities) + + def _schedule(self, method, context, topic, *args, **kwargs): + """Tries to call schedule_* method on the driver to retrieve host. + Falls back to schedule(context, topic) if method doesn't exist. + """ + driver_method_name = 'schedule_%s' % method + try: + driver_method = getattr(self.driver, driver_method_name) + args = (context,) + args + except AttributeError, e: + LOG.warning(_("Driver Method %(driver_method_name)s missing: " + "%(e)s. Reverting to schedule()") % locals()) + driver_method = self.driver.schedule + args = (context, topic, method) + args + + # Scheduler methods are responsible for casting. + try: + return driver_method(*args, **kwargs) + except Exception as ex: + with utils.save_and_reraise_exception(): + self._set_vm_state_and_notify(method, + {'vm_state': vm_states.ERROR}, + context, ex, *args, **kwargs) + + def run_instance(self, context, topic, *args, **kwargs): + """Tries to call schedule_run_instance on the driver. + Sets instance vm_state to ERROR on exceptions + """ + args = (context,) + args + try: + return self.driver.schedule_run_instance(*args, **kwargs) + except exception.NoValidHost as ex: + # don't reraise + self._set_vm_state_and_notify('run_instance', + {'vm_state': vm_states.ERROR}, + context, ex, *args, **kwargs) + except Exception as ex: + with utils.save_and_reraise_exception(): + self._set_vm_state_and_notify('run_instance', + {'vm_state': vm_states.ERROR}, + context, ex, *args, **kwargs) + + def prep_resize(self, context, topic, *args, **kwargs): + """Tries to call schedule_prep_resize on the driver. + Sets instance vm_state to ACTIVE on NoHostFound + Sets vm_state to ERROR on other exceptions + """ + args = (context,) + args + try: + return self.driver.schedule_prep_resize(*args, **kwargs) + except exception.NoValidHost as ex: + self._set_vm_state_and_notify('prep_resize', + {'vm_state': vm_states.ACTIVE, + 'task_state': None}, + context, ex, *args, **kwargs) + except Exception as ex: + with utils.save_and_reraise_exception(): + self._set_vm_state_and_notify('prep_resize', + {'vm_state': vm_states.ERROR}, + context, ex, *args, **kwargs) + + def _set_vm_state_and_notify(self, method, updates, context, ex, + *args, **kwargs): + """changes VM state and notifies""" + # FIXME(comstud): Re-factor this somehow. Not sure this belongs in the + # scheduler manager like this. We should make this easier. + # run_instance only sends a request_spec, and an instance may or may + # not have been created in the API (or scheduler) already. If it was + # created, there's a 'uuid' set in the instance_properties of the + # request_spec. + # (littleidea): I refactored this a bit, and I agree + # it should be easier :) + # The refactoring could go further but trying to minimize changes + # for essex timeframe + + LOG.warning(_("Failed to schedule_%(method)s: %(ex)s") % locals()) + + vm_state = updates['vm_state'] + request_spec = kwargs.get('request_spec', {}) + properties = request_spec.get('instance_properties', {}) + instance_uuid = properties.get('uuid', {}) + + if instance_uuid: + state = vm_state.upper() + LOG.warning(_('Setting instance to %(state)s state.'), locals(), + instance_uuid=instance_uuid) + db.instance_update(context, instance_uuid, updates) + + payload = dict(request_spec=request_spec, + instance_properties=properties, + instance_id=instance_uuid, + state=vm_state, + method=method, + reason=ex) + + notifier.notify(notifier.publisher_id("scheduler"), + 'scheduler.' + method, notifier.ERROR, payload) + + # NOTE (masumotok) : This method should be moved to cinder.api.ec2.admin. + # Based on bexar design summit discussion, + # just put this here for bexar release. + def show_host_resources(self, context, host): + """Shows the physical/usage resource given by hosts. + + :param context: security context + :param host: hostname + :returns: + example format is below:: + + {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} + D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, + 'vcpus_used': 12, 'memory_mb_used': 10240, + 'local_gb_used': 64} + + """ + # Getting compute node info and related instances info + compute_ref = db.service_get_all_compute_by_host(context, host) + compute_ref = compute_ref[0] + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() + + return {'resource': resource, 'usage': usage} diff --git a/cinder/scheduler/simple.py b/cinder/scheduler/simple.py new file mode 100644 index 00000000000..8849696f342 --- /dev/null +++ b/cinder/scheduler/simple.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 OpenStack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple Scheduler +""" + +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import cfg +from cinder.scheduler import chance +from cinder.scheduler import driver +from cinder import utils + + +simple_scheduler_opts = [ + cfg.IntOpt("max_cores", + default=16, + help="maximum number of instance cores to allow per host"), + cfg.IntOpt("max_gigabytes", + default=10000, + help="maximum number of volume gigabytes to allow per host"), + cfg.IntOpt("max_networks", + default=1000, + help="maximum number of networks to allow per host"), + cfg.BoolOpt('skip_isolated_core_check', + default=True, + help='Allow overcommitting vcpus on isolated hosts'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(simple_scheduler_opts) + + +class SimpleScheduler(chance.ChanceScheduler): + """Implements Naive Scheduler that tries to find least loaded host.""" + + def _schedule_instance(self, context, instance_opts, *_args, **_kwargs): + """Picks a host that is up and has the fewest running instances.""" + elevated = context.elevated() + + availability_zone = instance_opts.get('availability_zone') + + zone, host = FLAGS.default_schedule_zone, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + + if host and context.is_admin: + service = db.service_get_by_args(elevated, host, 'cinder-compute') + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + return host + + results = db.service_get_all_compute_sorted(elevated) + in_isolation = instance_opts['image_ref'] in FLAGS.isolated_images + check_cores = not in_isolation or not FLAGS.skip_isolated_core_check + if zone: + results = [(service, cores) for (service, cores) in results + if service['availability_zone'] == zone] + for result in results: + (service, instance_cores) = result + if in_isolation and service['host'] not in FLAGS.isolated_hosts: + # isloated images run on isolated hosts + continue + if service['host'] in FLAGS.isolated_hosts and not in_isolation: + # images that aren't isolated only run on general hosts + continue + if (check_cores and + instance_cores + instance_opts['vcpus'] > FLAGS.max_cores): + msg = _("Not enough allocatable CPU cores remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + return service['host'] + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) + + def schedule_run_instance(self, context, request_spec, *_args, **_kwargs): + num_instances = request_spec.get('num_instances', 1) + instances = [] + for num in xrange(num_instances): + host = self._schedule_instance(context, + request_spec['instance_properties'], *_args, **_kwargs) + request_spec['instance_properties']['launch_index'] = num + instance_ref = self.create_instance_db_entry(context, + request_spec) + driver.cast_to_compute_host(context, host, 'run_instance', + instance_uuid=instance_ref['uuid'], **_kwargs) + instances.append(driver.encode_instance(instance_ref)) + # So if we loop around, create_instance_db_entry will actually + # create a new entry, instead of assume it's been created + # already + del request_spec['instance_properties']['uuid'] + return instances + + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): + """Picks a host that is up and has the fewest volumes.""" + elevated = context.elevated() + + volume_ref = db.volume_get(context, volume_id) + availability_zone = volume_ref.get('availability_zone') + + zone, host = None, None + if availability_zone: + zone, _x, host = availability_zone.partition(':') + if host and context.is_admin: + service = db.service_get_by_args(elevated, host, 'cinder-volume') + if not utils.service_is_up(service): + raise exception.WillNotSchedule(host=host) + driver.cast_to_volume_host(context, host, 'create_volume', + volume_id=volume_id, **_kwargs) + return None + + results = db.service_get_all_volume_sorted(elevated) + if zone: + results = [(service, gigs) for (service, gigs) in results + if service['availability_zone'] == zone] + for result in results: + (service, volume_gigabytes) = result + if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes: + msg = _("Not enough allocatable volume gigabytes remaining") + raise exception.NoValidHost(reason=msg) + if utils.service_is_up(service) and not service['disabled']: + driver.cast_to_volume_host(context, service['host'], + 'create_volume', volume_id=volume_id, **_kwargs) + return None + msg = _("Is the appropriate service running?") + raise exception.NoValidHost(reason=msg) diff --git a/cinder/service.py b/cinder/service.py new file mode 100644 index 00000000000..363b3d967a2 --- /dev/null +++ b/cinder/service.py @@ -0,0 +1,429 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import inspect +import os +import random +import signal + +import eventlet +import greenlet + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils +from cinder import rpc +from cinder import utils +from cinder import version +from cinder import wsgi + + +LOG = logging.getLogger(__name__) + +service_opts = [ + cfg.IntOpt('report_interval', + default=10, + help='seconds between nodes reporting state to datastore'), + cfg.IntOpt('periodic_interval', + default=60, + help='seconds between running periodic tasks'), + cfg.IntOpt('periodic_fuzzy_delay', + default=60, + help='range of seconds to randomly delay when starting the' + ' periodic task scheduler to reduce stampeding.' + ' (Disable by setting to 0)'), + cfg.StrOpt('osapi_volume_listen', + default="0.0.0.0", + help='IP address for OpenStack Volume API to listen'), + cfg.IntOpt('osapi_volume_listen_port', + default=8776, + help='port for os volume api to listen'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(service_opts) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = [] + + @staticmethod + def run_server(server): + """Start and wait for a server to finish. + + :param service: Server to run and wait for. + :returns: None + + """ + server.start() + server.wait() + + def launch_server(self, server): + """Load and start the given server. + + :param server: The server you would like to start. + :returns: None + + """ + gt = eventlet.spawn(self.run_server, server) + self._services.append(gt) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + for service in self._services: + service.kill() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + def sigterm(sig, frame): + LOG.audit(_("SIGTERM received")) + # NOTE(jk0): Raise a ^C which is caught by the caller and cleanly + # shuts down the service. This does not yet handle eventlet + # threads. + raise KeyboardInterrupt + + signal.signal(signal.SIGTERM, sigterm) + + for service in self._services: + try: + service.wait() + except greenlet.GreenletExit: + pass + + +class Service(object): + """Service object for binaries running on hosts. + + A service takes a manager and enables rpc by listening to queues based + on topic. It also periodically runs tasks on the manager and reports + it state to the database services table.""" + + def __init__(self, host, binary, topic, manager, report_interval=None, + periodic_interval=None, periodic_fuzzy_delay=None, + *args, **kwargs): + self.host = host + self.binary = binary + self.topic = topic + self.manager_class_name = manager + manager_class = importutils.import_class(self.manager_class_name) + self.manager = manager_class(host=self.host, *args, **kwargs) + self.report_interval = report_interval + self.periodic_interval = periodic_interval + self.periodic_fuzzy_delay = periodic_fuzzy_delay + super(Service, self).__init__(*args, **kwargs) + self.saved_args, self.saved_kwargs = args, kwargs + self.timers = [] + + def start(self): + vcs_string = version.version_string_with_vcs() + LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), + {'topic': self.topic, 'vcs_string': vcs_string}) + utils.cleanup_file_locks() + rpc.register_opts(FLAGS) + self.manager.init_host() + self.model_disconnected = False + ctxt = context.get_admin_context() + try: + service_ref = db.service_get_by_args(ctxt, + self.host, + self.binary) + self.service_id = service_ref['id'] + except exception.NotFound: + self._create_service_ref(ctxt) + + if 'cinder-compute' == self.binary: + self.manager.update_available_resource(ctxt) + + self.conn = rpc.create_connection(new=True) + LOG.debug(_("Creating Consumer connection for Service %s") % + self.topic) + + # Share this same connection for these Consumers + self.conn.create_consumer(self.topic, self, fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + self.conn.create_consumer(node_topic, self, fanout=False) + + self.conn.create_consumer(self.topic, self, fanout=True) + + # Consume from all consumers in a thread + self.conn.consume_in_thread() + + if self.report_interval: + pulse = utils.LoopingCall(self.report_state) + pulse.start(interval=self.report_interval, + initial_delay=self.report_interval) + self.timers.append(pulse) + + if self.periodic_interval: + if self.periodic_fuzzy_delay: + initial_delay = random.randint(0, self.periodic_fuzzy_delay) + else: + initial_delay = None + + periodic = utils.LoopingCall(self.periodic_tasks) + periodic.start(interval=self.periodic_interval, + initial_delay=initial_delay) + self.timers.append(periodic) + + def _create_service_ref(self, context): + zone = FLAGS.node_availability_zone + service_ref = db.service_create(context, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0, + 'availability_zone': zone}) + self.service_id = service_ref['id'] + + def __getattr__(self, key): + manager = self.__dict__.get('manager', None) + return getattr(manager, key) + + @classmethod + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None, + periodic_fuzzy_delay=None): + """Instantiates class and passes back application object. + + :param host: defaults to FLAGS.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'cinder-' part + :param manager: defaults to FLAGS._manager + :param report_interval: defaults to FLAGS.report_interval + :param periodic_interval: defaults to FLAGS.periodic_interval + :param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay + + """ + if not host: + host = FLAGS.host + if not binary: + binary = os.path.basename(inspect.stack()[-1][1]) + if not topic: + topic = binary.rpartition('cinder-')[2] + if not manager: + manager = FLAGS.get('%s_manager' % topic, None) + if report_interval is None: + report_interval = FLAGS.report_interval + if periodic_interval is None: + periodic_interval = FLAGS.periodic_interval + if periodic_fuzzy_delay is None: + periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay + service_obj = cls(host, binary, topic, manager, + report_interval=report_interval, + periodic_interval=periodic_interval, + periodic_fuzzy_delay=periodic_fuzzy_delay) + + return service_obj + + def kill(self): + """Destroy the service object in the datastore.""" + self.stop() + try: + db.service_destroy(context.get_admin_context(), self.service_id) + except exception.NotFound: + LOG.warn(_('Service killed that has no database entry')) + + def stop(self): + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass + for x in self.timers: + try: + x.stop() + except Exception: + pass + self.timers = [] + + def wait(self): + for x in self.timers: + try: + x.wait() + except Exception: + pass + + def periodic_tasks(self, raise_on_error=False): + """Tasks to be run at a periodic interval.""" + ctxt = context.get_admin_context() + self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error) + + def report_state(self): + """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() + zone = FLAGS.node_availability_zone + state_catalog = {} + try: + try: + service_ref = db.service_get(ctxt, self.service_id) + except exception.NotFound: + LOG.debug(_('The service database object disappeared, ' + 'Recreating it.')) + self._create_service_ref(ctxt) + service_ref = db.service_get(ctxt, self.service_id) + + state_catalog['report_count'] = service_ref['report_count'] + 1 + if zone != service_ref['availability_zone']: + state_catalog['availability_zone'] = zone + + db.service_update(ctxt, + self.service_id, state_catalog) + + # TODO(termie): make this pattern be more elegant. + if getattr(self, 'model_disconnected', False): + self.model_disconnected = False + LOG.error(_('Recovered model server connection!')) + + # TODO(vish): this should probably only catch connection errors + except Exception: # pylint: disable=W0702 + if not getattr(self, 'model_disconnected', False): + self.model_disconnected = True + LOG.exception(_('model server went away')) + + +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" + + def __init__(self, name, loader=None): + """Initialize, but do not start the WSGI server. + + :param name: The name of the WSGI server given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None + + """ + self.name = name + self.manager = self._get_manager() + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0") + self.port = getattr(FLAGS, '%s_listen_port' % name, 0) + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port) + + def _get_manager(self): + """Initialize a Manager object appropriate for this service. + + Use the service name to look up a Manager subclass from the + configuration and initialize an instance. If no class name + is configured, just return None. + + :returns: a Manager instance, or None. + + """ + fl = '%s_manager' % self.name + if not fl in FLAGS: + return None + + manager_class_name = FLAGS.get(fl, None) + if not manager_class_name: + return None + + manager_class = importutils.import_class(manager_class_name) + return manager_class() + + def start(self): + """Start serving this service using loaded configuration. + + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. + + :returns: None + + """ + utils.cleanup_file_locks() + rpc.register_opts(FLAGS) + if self.manager: + self.manager.init_host() + self.server.start() + self.port = self.server.port + + def stop(self): + """Stop serving this API. + + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() + + +# NOTE(vish): the global launcher is to maintain the existing +# functionality of calling service.serve + +# service.wait +_launcher = None + + +def serve(*servers): + global _launcher + if not _launcher: + _launcher = Launcher() + for server in servers: + _launcher.launch_server(server) + + +def wait(): + LOG.debug(_('Full set of FLAGS:')) + for flag in FLAGS: + flag_get = FLAGS.get(flag, None) + # hide flag contents from log if contains a password + # should use secret flag when switch over to openstack-common + if ("_password" in flag or "_key" in flag or + (flag == "sql_connection" and "mysql:" in flag_get)): + LOG.debug(_('%(flag)s : FLAG SET ') % locals()) + else: + LOG.debug('%(flag)s : %(flag_get)s' % locals()) + try: + _launcher.wait() + except KeyboardInterrupt: + _launcher.stop() + rpc.cleanup() diff --git a/cinder/test.py b/cinder/test.py new file mode 100644 index 00000000000..434a7c6b994 --- /dev/null +++ b/cinder/test.py @@ -0,0 +1,295 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Base classes for our unit tests. + +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" + +import functools +import os +import shutil +import uuid +import unittest + +import mox +import nose.plugins.skip +import stubout + +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import utils +from cinder import service +from cinder import tests + + +test_opts = [ + cfg.StrOpt('sqlite_clean_db', + default='clean.sqlite', + help='File name of clean sqlite db'), + cfg.BoolOpt('fake_tests', + default=True, + help='should we use everything for testing'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(test_opts) + +LOG = logging.getLogger(__name__) + + +class skip_test(object): + """Decorator that skips a test.""" + # TODO(tr3buchet): remember forever what comstud did here + def __init__(self, msg): + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + raise nose.SkipTest(self.message) + return _skipper + + +class skip_if(object): + """Decorator that skips a test if condition is true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +class skip_unless(object): + """Decorator that skips a test if condition is not true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if not self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +def skip_if_fake(func): + """Decorator that skips a test if running in fake mode.""" + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if FLAGS.fake_tests: + raise unittest.SkipTest('Test cannot be run in fake mode') + else: + return func(*args, **kw) + return _skipper + + +class TestingException(Exception): + pass + + +class TestCase(unittest.TestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + """Run before each test method to initialize test environment.""" + super(TestCase, self).setUp() + # NOTE(vish): We need a better method for creating fixtures for tests + # now that we have some required db setup for the system + # to work properly. + self.start = utils.utcnow() + tests.reset_db() + + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.injected = [] + self._services = [] + self._overridden_opts = [] + + def tearDown(self): + """Runs after each test method to tear down test environment.""" + try: + self.mox.UnsetStubs() + self.stubs.UnsetAll() + self.stubs.SmartUnsetAll() + self.mox.VerifyAll() + super(TestCase, self).tearDown() + finally: + # Reset any overridden flags + self.reset_flags() + + # Stop any timers + for x in self.injected: + try: + x.stop() + except AssertionError: + pass + + # Kill any services + for x in self._services: + try: + x.kill() + except Exception: + pass + + # Delete attributes that don't start with _ so they don't pin + # memory around unnecessarily for the duration of the test + # suite + for key in [k for k in self.__dict__.keys() if k[0] != '_']: + del self.__dict__[key] + + def flags(self, **kw): + """Override flag variables for a test.""" + for k, v in kw.iteritems(): + FLAGS.set_override(k, v) + self._overridden_opts.append(k) + + def reset_flags(self): + """Resets all flag variables for the test. + + Runs after each test. + + """ + for k in self._overridden_opts: + FLAGS.set_override(k, None) + self._overridden_opts = [] + + def start_service(self, name, host=None, **kwargs): + host = host and host or uuid.uuid4().hex + kwargs.setdefault('host', host) + kwargs.setdefault('binary', 'cinder-%s' % name) + svc = service.Service.create(**kwargs) + svc.start() + self._services.append(svc) + return svc + + # Useful assertions + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + + """ + def raise_assertion(msg): + d1str = str(d1) + d2str = str(d2) + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % locals()) + raise AssertionError(base_msg) + + d1keys = set(d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % locals()) + + for key in d1keys: + d1value = d1[key] + d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance + except (ValueError, TypeError): + # If both values aren't convertable to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + self.assertDictMatch(d1value, d2value) + elif 'DONTCARE' in (d1value, d2value): + continue + elif approx_equal and within_tolerance: + continue + elif d1value != d2value: + raise_assertion("d1['%(key)s']=%(d1value)s != " + "d2['%(key)s']=%(d2value)s" % locals()) + + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): + """Assert a list of dicts are equivalent.""" + def raise_assertion(msg): + L1str = str(L1) + L2str = str(L2) + base_msg = ('List of dictionaries do not match: %(msg)s ' + 'L1: %(L1str)s L2: %(L2str)s' % locals()) + raise AssertionError(base_msg) + + L1count = len(L1) + L2count = len(L2) + if L1count != L2count: + raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' + 'len(L2)=%(L2count)d' % locals()) + + for d1, d2 in zip(L1, L2): + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) + + def assertSubDictMatch(self, sub_dict, super_dict): + """Assert a sub_dict is subset of super_dict.""" + self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys()))) + for k, sub_value in sub_dict.items(): + super_value = super_dict[k] + if isinstance(sub_value, dict): + self.assertSubDictMatch(sub_value, super_value) + elif 'DONTCARE' in (sub_value, super_value): + continue + else: + self.assertEqual(sub_value, super_value) + + def assertIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' in 'b'""" + try: + f = super(TestCase, self).assertIn + except AttributeError: + self.assertTrue(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) + + def assertNotIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + try: + f = super(TestCase, self).assertNotIn + except AttributeError: + self.assertFalse(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) diff --git a/cinder/testing/README.rst b/cinder/testing/README.rst new file mode 100644 index 00000000000..3d15e5cecd2 --- /dev/null +++ b/cinder/testing/README.rst @@ -0,0 +1,66 @@ +===================================== +OpenStack Cinder Testing Infrastructure +===================================== + +A note of clarification is in order, to help those who are new to testing in +OpenStack cinder: + +- actual unit tests are created in the "tests" directory; +- the "testing" directory is used to house the infrastructure needed to support + testing in OpenStack Cinder. + +This README file attempts to provide current and prospective contributors with +everything they need to know in order to start creating unit tests and +utilizing the convenience code provided in cinder.testing. + +Note: the content for the rest of this file will be added as the work items in +the following blueprint are completed: + https://blueprints.launchpad.net/cinder/+spec/consolidate-testing-infrastructure + + +Test Types: Unit vs. Functional vs. Integration +----------------------------------------------- + +TBD + +Writing Unit Tests +------------------ + +TBD + +Using Fakes +~~~~~~~~~~~ + +TBD + +test.TestCase +------------- +The TestCase class from cinder.test (generally imported as test) will +automatically manage self.stubs using the stubout module and self.mox +using the mox module during the setUp step. They will automatically +verify and clean up during the tearDown step. + +If using test.TestCase, calling the super class setUp is required and +calling the super class tearDown is required to be last if tearDown +is overriden. + +Writing Functional Tests +------------------------ + +TBD + +Writing Integration Tests +------------------------- + +TBD + +Tests and assertRaises +---------------------- +When asserting that a test should raise an exception, test against the +most specific exception possible. An overly broad exception type (like +Exception) can mask errors in the unit test itself. + +Example:: + + self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid, + elevated, instance_uuid) diff --git a/cinder/testing/__init__.py b/cinder/testing/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cinder/testing/fake/__init__.py b/cinder/testing/fake/__init__.py new file mode 100644 index 00000000000..5cdad4717e5 --- /dev/null +++ b/cinder/testing/fake/__init__.py @@ -0,0 +1 @@ +import rabbit diff --git a/cinder/testing/runner.py b/cinder/testing/runner.py new file mode 100644 index 00000000000..77b7701ad6b --- /dev/null +++ b/cinder/testing/runner.py @@ -0,0 +1,372 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +"""Unittest runner for Cinder. + +To run all tests + python cinder/testing/runner.py + +To run a single test module: + python cinder/testing/runner.py test_compute + + or + + python cinder/testing/runner.py api.test_wsgi + +To run a single test: + python cinder/testing/runner.py + test_compute:ComputeTestCase.test_run_terminate + +""" + +import gettext +import heapq +import os +import unittest +import sys +import time + +import eventlet +from nose import config +from nose import core +from nose import result + +gettext.install('cinder', unicode=1) +reldir = os.path.join(os.path.dirname(__file__), '..', '..') +absdir = os.path.abspath(reldir) +sys.path.insert(0, absdir) + +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg + + +class _AnsiColorizer(object): + """ + A colorizer is an object that loosely wraps around a stream, allowing + callers to write text to the stream in a particular color. + + Colorizer classes must implement C{supported()} and C{write(text, color)}. + """ + _colors = dict(black=30, red=31, green=32, yellow=33, + blue=34, magenta=35, cyan=36, white=37) + + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + """ + A class method that returns True if the current platform supports + coloring terminal output using this method. Returns False otherwise. + """ + if not stream.isatty(): + return False # auto color only on TTYs + try: + import curses + except ImportError: + return False + else: + try: + try: + return curses.tigetnum("colors") > 2 + except curses.error: + curses.setupterm() + return curses.tigetnum("colors") > 2 + except Exception: + raise + # guess false in case of error + return False + supported = classmethod(supported) + + def write(self, text, color): + """ + Write the given text to the stream in the given color. + + @param text: Text to be written to the stream. + + @param color: A string label for a color. e.g. 'red', 'white'. + """ + color = self._colors[color] + self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) + + +class _Win32Colorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + import win32console as win + red, green, blue, bold = (win.FOREGROUND_RED, win.FOREGROUND_GREEN, + win.FOREGROUND_BLUE, win.FOREGROUND_INTENSITY) + self.stream = stream + self.screenBuffer = win.GetStdHandle(win.STD_OUT_HANDLE) + self._colors = { + 'normal': red | green | blue, + 'red': red | bold, + 'green': green | bold, + 'blue': blue | bold, + 'yellow': red | green | bold, + 'magenta': red | blue | bold, + 'cyan': green | blue | bold, + 'white': red | green | blue | bold + } + + def supported(cls, stream=sys.stdout): + try: + import win32console + screenBuffer = win32console.GetStdHandle( + win32console.STD_OUT_HANDLE) + except ImportError: + return False + import pywintypes + try: + screenBuffer.SetConsoleTextAttribute( + win32console.FOREGROUND_RED | + win32console.FOREGROUND_GREEN | + win32console.FOREGROUND_BLUE) + except pywintypes.error: + return False + else: + return True + supported = classmethod(supported) + + def write(self, text, color): + color = self._colors[color] + self.screenBuffer.SetConsoleTextAttribute(color) + self.stream.write(text) + self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) + + +class _NullColorizer(object): + """ + See _AnsiColorizer docstring. + """ + def __init__(self, stream): + self.stream = stream + + def supported(cls, stream=sys.stdout): + return True + supported = classmethod(supported) + + def write(self, text, color): + self.stream.write(text) + + +def get_elapsed_time_color(elapsed_time): + if elapsed_time > 1.0: + return 'red' + elif elapsed_time > 0.25: + return 'yellow' + else: + return 'green' + + +class CinderTestResult(result.TextTestResult): + def __init__(self, *args, **kw): + self.show_elapsed = kw.pop('show_elapsed') + result.TextTestResult.__init__(self, *args, **kw) + self.num_slow_tests = 5 + self.slow_tests = [] # this is a fixed-sized heap + self._last_case = None + self.colorizer = None + # NOTE(vish): reset stdout for the terminal check + stdout = sys.stdout + sys.stdout = sys.__stdout__ + for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: + if colorizer.supported(): + self.colorizer = colorizer(self.stream) + break + sys.stdout = stdout + + # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate + # error results in it failing to be initialized later. Otherwise, + # _handleElapsedTime will fail, causing the wrong error message to + # be outputted. + self.start_time = time.time() + + def getDescription(self, test): + return str(test) + + def _handleElapsedTime(self, test): + self.elapsed_time = time.time() - self.start_time + item = (self.elapsed_time, test) + # Record only the n-slowest tests using heap + if len(self.slow_tests) >= self.num_slow_tests: + heapq.heappushpop(self.slow_tests, item) + else: + heapq.heappush(self.slow_tests, item) + + def _writeElapsedTime(self, test): + color = get_elapsed_time_color(self.elapsed_time) + self.colorizer.write(" %.2f" % self.elapsed_time, color) + + def _writeResult(self, test, long_result, color, short_result, success): + if self.showAll: + self.colorizer.write(long_result, color) + if self.show_elapsed and success: + self._writeElapsedTime(test) + self.stream.writeln() + elif self.dots: + self.stream.write(short_result) + self.stream.flush() + + # NOTE(vish): copied from unittest with edit to add color + def addSuccess(self, test): + unittest.TestResult.addSuccess(self, test) + self._handleElapsedTime(test) + self._writeResult(test, 'OK', 'green', '.', True) + + # NOTE(vish): copied from unittest with edit to add color + def addFailure(self, test, err): + unittest.TestResult.addFailure(self, test, err) + self._handleElapsedTime(test) + self._writeResult(test, 'FAIL', 'red', 'F', False) + + # NOTE(vish): copied from nose with edit to add color + def addError(self, test, err): + """Overrides normal addError to add support for + errorClasses. If the exception is a registered class, the + error will be added to the list for that class, not errors. + """ + self._handleElapsedTime(test) + stream = getattr(self, 'stream', None) + ec, ev, tb = err + try: + exc_info = self._exc_info_to_string(err, test) + except TypeError: + # 2.3 compat + exc_info = self._exc_info_to_string(err) + for cls, (storage, label, isfail) in self.errorClasses.items(): + if result.isclass(ec) and issubclass(ec, cls): + if isfail: + test.passed = False + storage.append((test, exc_info)) + # Might get patched into a streamless result + if stream is not None: + if self.showAll: + message = [label] + detail = result._exception_detail(err[1]) + if detail: + message.append(detail) + stream.writeln(": ".join(message)) + elif self.dots: + stream.write(label[:1]) + return + self.errors.append((test, exc_info)) + test.passed = False + if stream is not None: + self._writeResult(test, 'ERROR', 'red', 'E', False) + + def startTest(self, test): + unittest.TestResult.startTest(self, test) + self.start_time = time.time() + current_case = test.test.__class__.__name__ + + if self.showAll: + if current_case != self._last_case: + self.stream.writeln(current_case) + self._last_case = current_case + + self.stream.write( + ' %s' % str(test.test._testMethodName).ljust(60)) + self.stream.flush() + + +class CinderTestRunner(core.TextTestRunner): + def __init__(self, *args, **kwargs): + self.show_elapsed = kwargs.pop('show_elapsed') + core.TextTestRunner.__init__(self, *args, **kwargs) + + def _makeResult(self): + return CinderTestResult(self.stream, + self.descriptions, + self.verbosity, + self.config, + show_elapsed=self.show_elapsed) + + def _writeSlowTests(self, result_): + # Pare out 'fast' tests + slow_tests = [item for item in result_.slow_tests + if get_elapsed_time_color(item[0]) != 'green'] + if slow_tests: + slow_total_time = sum(item[0] for item in slow_tests) + self.stream.writeln("Slowest %i tests took %.2f secs:" + % (len(slow_tests), slow_total_time)) + for elapsed_time, test in sorted(slow_tests, reverse=True): + time_str = "%.2f" % elapsed_time + self.stream.writeln(" %s %s" % (time_str.ljust(10), test)) + + def run(self, test): + result_ = core.TextTestRunner.run(self, test) + if self.show_elapsed: + self._writeSlowTests(result_) + return result_ + + +def run(): + # This is a fix to allow the --hide-elapsed flag while accepting + # arbitrary nosetest flags as well + argv = [x for x in sys.argv if x != '--hide-elapsed'] + hide_elapsed = argv != sys.argv + logging.setup() + + # If any argument looks like a test name but doesn't have "cinder.tests" in + # front of it, automatically add that so we don't have to type as much + for i, arg in enumerate(argv): + if arg.startswith('test_'): + argv[i] = 'cinder.tests.%s' % arg + + testdir = os.path.abspath(os.path.join("cinder", "tests")) + c = config.Config(stream=sys.stdout, + env=os.environ, + verbosity=3, + workingDir=testdir, + plugins=core.DefaultPluginManager()) + + runner = CinderTestRunner(stream=c.stream, + verbosity=c.verbosity, + config=c, + show_elapsed=not hide_elapsed) + sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) + + +if __name__ == '__main__': + eventlet.monkey_patch() + run() diff --git a/cinder/tests/__init__.py b/cinder/tests/__init__.py new file mode 100644 index 00000000000..82532bf04af --- /dev/null +++ b/cinder/tests/__init__.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`cinder.tests` -- Cinder Unittests +===================================================== + +.. automodule:: cinder.tests + :platform: Unix +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks +import __builtin__ +setattr(__builtin__, '_', lambda x: x) +import os +import shutil + +from cinder.db.sqlalchemy.session import get_engine +from cinder import flags + +FLAGS = flags.FLAGS + +_DB = None + + +def reset_db(): + if FLAGS.sql_connection == "sqlite://": + engine = get_engine() + engine.dispose() + conn = engine.connect() + conn.connection.executescript(_DB) + else: + shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), + os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) + + +def setup(): + import mox # Fail fast if you don't have mox. Workaround for bug 810424 + + from cinder import rpc # Register rpc_backend before fake_flags sets it + FLAGS.register_opts(rpc.rpc_opts) + + from cinder.db import migration + from cinder.tests import fake_flags + + if FLAGS.sql_connection == "sqlite://": + if migration.db_version() > 1: + return + else: + testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) + if os.path.exists(testdb): + return + migration.db_sync() + + if FLAGS.sql_connection == "sqlite://": + global _DB + engine = get_engine() + conn = engine.connect() + _DB = "".join(line for line in conn.connection.iterdump()) + else: + cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) + shutil.copyfile(testdb, cleandb) diff --git a/cinder/tests/api/__init__.py b/cinder/tests/api/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/api/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/__init__.py b/cinder/tests/api/openstack/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/api/openstack/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/common.py b/cinder/tests/api/openstack/common.py new file mode 100644 index 00000000000..19515ca67e1 --- /dev/null +++ b/cinder/tests/api/openstack/common.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import webob + + +def webob_factory(url): + """Factory for removing duplicate webob code from tests""" + + base_url = url + + def web_request(url, method=None, body=None): + req = webob.Request.blank("%s%s" % (base_url, url)) + if method: + req.content_type = "application/json" + req.method = method + if body: + req.body = json.dumps(body) + return req + return web_request + + +def compare_links(actual, expected): + """Compare xml atom links.""" + + return compare_tree_to_dict(actual, expected, ('rel', 'href', 'type')) + + +def compare_media_types(actual, expected): + """Compare xml media types.""" + + return compare_tree_to_dict(actual, expected, ('base', 'type')) + + +def compare_tree_to_dict(actual, expected, keys): + """Compare parts of lxml.etree objects to dicts.""" + + for elem, data in zip(actual, expected): + for key in keys: + if elem.get(key) != data.get(key): + return False + return True diff --git a/cinder/tests/api/openstack/fakes.py b/cinder/tests/api/openstack/fakes.py new file mode 100644 index 00000000000..8d97fca89b2 --- /dev/null +++ b/cinder/tests/api/openstack/fakes.py @@ -0,0 +1,234 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +import routes +import webob +import webob.dec +import webob.request + +from cinder.api import auth as api_auth +from cinder.api import openstack as openstack_api +from cinder.api.openstack import auth +from cinder.api.openstack import urlmap +from cinder.api.openstack.compute import versions +from cinder.api.openstack import wsgi as os_wsgi +from cinder import context +from cinder.db.sqlalchemy import models +from cinder import exception as exc +from cinder import utils +from cinder import wsgi + + +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' +FAKE_UUIDS = {} + + +class Context(object): + pass + + +class FakeRouter(wsgi.Router): + def __init__(self, ext_mgr=None): + pass + + @webob.dec.wsgify + def __call__(self, req): + res = webob.Response() + res.status = '200' + res.headers['X-Test-Success'] = 'True' + return res + + +@webob.dec.wsgify +def fake_wsgi(self, req): + return self.application + + +def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, + use_no_auth=False, ext_mgr=None): + if not inner_app_v2: + inner_app_v2 = compute.APIRouter(ext_mgr) + + if fake_auth: + if fake_auth_context is not None: + ctxt = fake_auth_context + else: + ctxt = context.RequestContext('fake', 'fake', auth_token=True) + api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt, + limits.RateLimitingMiddleware(inner_app_v2))) + elif use_no_auth: + api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + else: + api_v2 = openstack_api.FaultWrapper(auth.AuthMiddleware( + limits.RateLimitingMiddleware(inner_app_v2))) + + mapper = urlmap.URLMap() + mapper['/v2'] = api_v2 + mapper['/v1.1'] = api_v2 + mapper['/'] = openstack_api.FaultWrapper(versions.Versions()) + return mapper + + +def stub_out_rate_limiting(stubs): + def fake_rate_init(self, app): + # super(limits.RateLimitingMiddleware, self).__init__(app) + self.application = app + + # FIXME(ja): unsure about limits in volumes + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__init__', fake_rate_init) + + # stubs.Set(cinder.api.openstack.compute.limits.RateLimitingMiddleware, + # '__call__', fake_wsgi) + + +class FakeToken(object): + id_count = 0 + + def __getitem__(self, key): + return getattr(self, key) + + def __init__(self, **kwargs): + FakeToken.id_count += 1 + self.id = FakeToken.id_count + for k, v in kwargs.iteritems(): + setattr(self, k, v) + + +class FakeRequestContext(context.RequestContext): + def __init__(self, *args, **kwargs): + kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') + return super(FakeRequestContext, self).__init__(*args, **kwargs) + + +class HTTPRequest(webob.Request): + + @classmethod + def blank(cls, *args, **kwargs): + kwargs['base_url'] = 'http://localhost/v2' + use_admin_context = kwargs.pop('use_admin_context', False) + out = webob.Request.blank(*args, **kwargs) + out.environ['cinder.context'] = FakeRequestContext('fake_user', 'fake', + is_admin=use_admin_context) + return out + + +class TestRouter(wsgi.Router): + def __init__(self, controller): + mapper = routes.Mapper() + mapper.resource("test", "tests", + controller=os_wsgi.Resource(controller)) + super(TestRouter, self).__init__(mapper) + + +class FakeAuthDatabase(object): + data = {} + + @staticmethod + def auth_token_get(context, token_hash): + return FakeAuthDatabase.data.get(token_hash, None) + + @staticmethod + def auth_token_create(context, token): + fake_token = FakeToken(created_at=utils.utcnow(), **token) + FakeAuthDatabase.data[fake_token.token_hash] = fake_token + FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token + return fake_token + + @staticmethod + def auth_token_destroy(context, token_id): + token = FakeAuthDatabase.data.get('id_%i' % token_id) + if token and token.token_hash in FakeAuthDatabase.data: + del FakeAuthDatabase.data[token.token_hash] + del FakeAuthDatabase.data['id_%i' % token_id] + + +class FakeRateLimiter(object): + def __init__(self, application): + self.application = application + + @webob.dec.wsgify + def __call__(self, req): + return self.application + + +def get_fake_uuid(token=0): + if not token in FAKE_UUIDS: + FAKE_UUIDS[token] = str(utils.gen_uuid()) + return FAKE_UUIDS[token] + + +def stub_volume(id, **kwargs): + volume = { + 'id': id, + 'user_id': 'fakeuser', + 'project_id': 'fakeproject', + 'host': 'fakehost', + 'size': 1, + 'availability_zone': 'fakeaz', + 'instance': {'uuid': 'fakeuuid'}, + 'mountpoint': '/', + 'status': 'fakestatus', + 'attach_status': 'attached', + 'name': 'vol name', + 'display_name': 'displayname', + 'display_description': 'displaydesc', + 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), + 'snapshot_id': None, + 'volume_type_id': 'fakevoltype', + 'volume_metadata': [], + 'volume_type': {'name': 'vol_type_name'}} + + volume.update(kwargs) + return volume + + +def stub_volume_create(self, context, size, name, description, snapshot, + **param): + vol = stub_volume(1) + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + try: + vol['snapshot_id'] = snapshot['id'] + except (KeyError, TypeError): + vol['snapshot_id'] = None + vol['availability_zone'] = param.get('availability_zone', 'fakeaz') + return vol + + +def stub_volume_update(self, context, *args, **param): + pass + + +def stub_volume_delete(self, context, *args, **param): + pass + + +def stub_volume_get(self, context, volume_id): + return stub_volume(volume_id) + + +def stub_volume_get_notfound(self, context, volume_id): + raise exc.NotFound + + +def stub_volume_get_all(self, context, search_opts=None): + return [stub_volume_get(self, context, 1)] diff --git a/cinder/tests/api/openstack/test_common.py b/cinder/tests/api/openstack/test_common.py new file mode 100644 index 00000000000..22ccbd77c93 --- /dev/null +++ b/cinder/tests/api/openstack/test_common.py @@ -0,0 +1,526 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suites for 'common' code used throughout the OpenStack HTTP API. +""" + +from lxml import etree +import webob +import webob.exc +import xml.dom.minidom as minidom + +from cinder import exception +from cinder import test +from cinder.api.openstack import common +from cinder.api.openstack import xmlutil + + +NS = "{http://docs.openstack.org/compute/api/v1.1}" +ATOMNS = "{http://www.w3.org/2005/Atom}" + + +class LimiterTest(test.TestCase): + """ + Unit tests for the `cinder.api.openstack.common.limited` method which takes + in a list of items and, depending on the 'offset' and 'limit' GET params, + returns a subset or complete set of the given items. + """ + + def setUp(self): + """ Run before each test. """ + super(LimiterTest, self).setUp() + self.tiny = range(1) + self.small = range(10) + self.medium = range(1000) + self.large = range(10000) + + def test_limiter_offset_zero(self): + """ Test offset key works with 0. """ + req = webob.Request.blank('/?offset=0') + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) + + def test_limiter_offset_medium(self): + """ Test offset key works with a medium sized number. """ + req = webob.Request.blank('/?offset=10') + self.assertEqual(common.limited(self.tiny, req), []) + self.assertEqual(common.limited(self.small, req), self.small[10:]) + self.assertEqual(common.limited(self.medium, req), self.medium[10:]) + self.assertEqual(common.limited(self.large, req), self.large[10:1010]) + + def test_limiter_offset_over_max(self): + """ Test offset key works with a number over 1000 (max_limit). """ + req = webob.Request.blank('/?offset=1001') + self.assertEqual(common.limited(self.tiny, req), []) + self.assertEqual(common.limited(self.small, req), []) + self.assertEqual(common.limited(self.medium, req), []) + self.assertEqual( + common.limited(self.large, req), self.large[1001:2001]) + + def test_limiter_offset_blank(self): + """ Test offset key works with a blank offset. """ + req = webob.Request.blank('/?offset=') + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) + + def test_limiter_offset_bad(self): + """ Test offset key works with a BAD offset. """ + req = webob.Request.blank(u'/?offset=\u0020aa') + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) + + def test_limiter_nothing(self): + """ Test request with no offset or limit """ + req = webob.Request.blank('/') + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_zero(self): + """ Test limit of zero. """ + req = webob.Request.blank('/?limit=0') + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_medium(self): + """ Test limit of 10. """ + req = webob.Request.blank('/?limit=10') + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium[:10]) + self.assertEqual(common.limited(self.large, req), self.large[:10]) + + def test_limiter_limit_over_max(self): + """ Test limit of 3000. """ + req = webob.Request.blank('/?limit=3000') + self.assertEqual(common.limited(self.tiny, req), self.tiny) + self.assertEqual(common.limited(self.small, req), self.small) + self.assertEqual(common.limited(self.medium, req), self.medium) + self.assertEqual(common.limited(self.large, req), self.large[:1000]) + + def test_limiter_limit_and_offset(self): + """ Test request with both limit and offset. """ + items = range(2000) + req = webob.Request.blank('/?offset=1&limit=3') + self.assertEqual(common.limited(items, req), items[1:4]) + req = webob.Request.blank('/?offset=3&limit=0') + self.assertEqual(common.limited(items, req), items[3:1003]) + req = webob.Request.blank('/?offset=3&limit=1500') + self.assertEqual(common.limited(items, req), items[3:1003]) + req = webob.Request.blank('/?offset=3000&limit=10') + self.assertEqual(common.limited(items, req), []) + + def test_limiter_custom_max_limit(self): + """ Test a max_limit other than 1000. """ + items = range(2000) + req = webob.Request.blank('/?offset=1&limit=3') + self.assertEqual( + common.limited(items, req, max_limit=2000), items[1:4]) + req = webob.Request.blank('/?offset=3&limit=0') + self.assertEqual( + common.limited(items, req, max_limit=2000), items[3:]) + req = webob.Request.blank('/?offset=3&limit=2500') + self.assertEqual( + common.limited(items, req, max_limit=2000), items[3:]) + req = webob.Request.blank('/?offset=3000&limit=10') + self.assertEqual(common.limited(items, req, max_limit=2000), []) + + def test_limiter_negative_limit(self): + """ Test a negative limit. """ + req = webob.Request.blank('/?limit=-3000') + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) + + def test_limiter_negative_offset(self): + """ Test a negative offset. """ + req = webob.Request.blank('/?offset=-30') + self.assertRaises( + webob.exc.HTTPBadRequest, common.limited, self.tiny, req) + + +class PaginationParamsTest(test.TestCase): + """ + Unit tests for the `cinder.api.openstack.common.get_pagination_params` + method which takes in a request object and returns 'marker' and 'limit' + GET params. + """ + + def test_no_params(self): + """ Test no params. """ + req = webob.Request.blank('/') + self.assertEqual(common.get_pagination_params(req), {}) + + def test_valid_marker(self): + """ Test valid marker param. """ + req = webob.Request.blank( + '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2') + self.assertEqual(common.get_pagination_params(req), + {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'}) + + def test_valid_limit(self): + """ Test valid limit param. """ + req = webob.Request.blank('/?limit=10') + self.assertEqual(common.get_pagination_params(req), {'limit': 10}) + + def test_invalid_limit(self): + """ Test invalid limit param. """ + req = webob.Request.blank('/?limit=-2') + self.assertRaises( + webob.exc.HTTPBadRequest, common.get_pagination_params, req) + + def test_valid_limit_and_marker(self): + """ Test valid limit and marker parameters. """ + marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' + req = webob.Request.blank('/?limit=20&marker=%s' % marker) + self.assertEqual(common.get_pagination_params(req), + {'marker': marker, 'limit': 20}) + + +class MiscFunctionsTest(test.TestCase): + + def test_remove_major_version_from_href(self): + fixture = 'http://www.testsite.com/v1/images' + expected = 'http://www.testsite.com/images' + actual = common.remove_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_remove_version_from_href(self): + fixture = 'http://www.testsite.com/v1.1/images' + expected = 'http://www.testsite.com/images' + actual = common.remove_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_remove_version_from_href_2(self): + fixture = 'http://www.testsite.com/v1.1/' + expected = 'http://www.testsite.com/' + actual = common.remove_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_remove_version_from_href_3(self): + fixture = 'http://www.testsite.com/v10.10' + expected = 'http://www.testsite.com' + actual = common.remove_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_remove_version_from_href_4(self): + fixture = 'http://www.testsite.com/v1.1/images/v10.5' + expected = 'http://www.testsite.com/images/v10.5' + actual = common.remove_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_remove_version_from_href_bad_request(self): + fixture = 'http://www.testsite.com/1.1/images' + self.assertRaises(ValueError, + common.remove_version_from_href, + fixture) + + def test_remove_version_from_href_bad_request_2(self): + fixture = 'http://www.testsite.com/v/images' + self.assertRaises(ValueError, + common.remove_version_from_href, + fixture) + + def test_remove_version_from_href_bad_request_3(self): + fixture = 'http://www.testsite.com/v1.1images' + self.assertRaises(ValueError, + common.remove_version_from_href, + fixture) + + def test_get_id_from_href_with_int_url(self): + fixture = 'http://www.testsite.com/dir/45' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_int(self): + fixture = '45' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_int_url_query(self): + fixture = 'http://www.testsite.com/dir/45?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = '45' + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid_url(self): + fixture = 'http://www.testsite.com/dir/abc123' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid_url_query(self): + fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl' + actual = common.get_id_from_href(fixture) + expected = "abc123" + self.assertEqual(actual, expected) + + def test_get_id_from_href_with_uuid(self): + fixture = 'abc123' + actual = common.get_id_from_href(fixture) + expected = 'abc123' + self.assertEqual(actual, expected) + + def test_get_version_from_href(self): + fixture = 'http://www.testsite.com/v1.1/images' + expected = '1.1' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_get_version_from_href_2(self): + fixture = 'http://www.testsite.com/v1.1' + expected = '1.1' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_get_version_from_href_default(self): + fixture = 'http://www.testsite.com/images' + expected = '2' + actual = common.get_version_from_href(fixture) + self.assertEqual(actual, expected) + + def test_raise_http_conflict_for_instance_invalid_state(self): + # Correct args + exc = exception.InstanceInvalidState(attr='fake_attr', + state='fake_state', method='fake_method') + try: + common.raise_http_conflict_for_instance_invalid_state(exc, + 'meow') + except Exception, e: + self.assertTrue(isinstance(e, webob.exc.HTTPConflict)) + msg = str(e) + self.assertEqual(msg, + "Cannot 'meow' while instance is in fake_attr fake_state") + else: + self.fail("webob.exc.HTTPConflict was not raised") + + # Incorrect args + exc = exception.InstanceInvalidState() + try: + common.raise_http_conflict_for_instance_invalid_state(exc, + 'meow') + except Exception, e: + self.assertTrue(isinstance(e, webob.exc.HTTPConflict)) + msg = str(e) + self.assertEqual(msg, + "Instance is in an invalid state for 'meow'") + else: + self.fail("webob.exc.HTTPConflict was not raised") + + +class MetadataXMLDeserializationTest(test.TestCase): + + deserializer = common.MetadataXMLDeserializer() + + def test_create(self): + request_body = """ + + asdf + jkl; + """ + output = self.deserializer.deserialize(request_body, 'create') + expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}} + self.assertEquals(output, expected) + + def test_create_empty(self): + request_body = """ + """ + output = self.deserializer.deserialize(request_body, 'create') + expected = {"body": {"metadata": {}}} + self.assertEquals(output, expected) + + def test_update_all(self): + request_body = """ + + asdf + jkl; + """ + output = self.deserializer.deserialize(request_body, 'update_all') + expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}} + self.assertEquals(output, expected) + + def test_update(self): + request_body = """ + asdf""" + output = self.deserializer.deserialize(request_body, 'update') + expected = {"body": {"meta": {"123": "asdf"}}} + self.assertEquals(output, expected) + + +class MetadataXMLSerializationTest(test.TestCase): + + def test_xml_declaration(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + + output = serializer.serialize(fixture) + print output + has_dec = output.startswith("") + self.assertTrue(has_dec) + + def test_index(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + def test_index_null(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + None: None, + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + def test_index_unicode(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + u'three': u'Jos\xe9', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 1) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(metadata_elem.text.strip(), meta_value) + + def test_show(self): + serializer = common.MetaItemTemplate() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) + + def test_update_all(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + 'key6': 'value6', + 'key4': 'value4', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 2) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + + def test_update_item(self): + serializer = common.MetaItemTemplate() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + meta_dict = fixture['meta'] + (meta_key, meta_value) = meta_dict.items()[0] + self.assertEqual(str(root.get('key')), str(meta_key)) + self.assertEqual(root.text.strip(), meta_value) + + def test_create(self): + serializer = common.MetadataTemplate() + fixture = { + 'metadata': { + 'key9': 'value9', + 'key2': 'value2', + 'key1': 'value1', + }, + } + output = serializer.serialize(fixture) + print output + root = etree.XML(output) + xmlutil.validate_schema(root, 'metadata') + metadata_dict = fixture['metadata'] + metadata_elems = root.findall('{0}meta'.format(NS)) + self.assertEqual(len(metadata_elems), 3) + for i, metadata_elem in enumerate(metadata_elems): + (meta_key, meta_value) = metadata_dict.items()[i] + self.assertEqual(str(metadata_elem.get('key')), str(meta_key)) + self.assertEqual(str(metadata_elem.text).strip(), str(meta_value)) + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + + value2 + value9 + value1 + + """.replace(" ", "").replace("\n", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/cinder/tests/api/openstack/test_faults.py b/cinder/tests/api/openstack/test_faults.py new file mode 100644 index 00000000000..9d85a14f499 --- /dev/null +++ b/cinder/tests/api/openstack/test_faults.py @@ -0,0 +1,208 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +from xml.dom import minidom + +import webob +import webob.dec +import webob.exc + +from cinder import test +from cinder.api.openstack import common +from cinder.api.openstack import wsgi + + +class TestFaults(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + """Remove characters from string which hinder XML equality testing.""" + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + expected = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + actual = json.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_413_fault_json(self): + """Test fault serialized to JSON via file-extension and/or header.""" + requests = [ + webob.Request.blank('/.json'), + webob.Request.blank('/', headers={"Accept": "application/json"}), + ] + + for request in requests: + exc = webob.exc.HTTPRequestEntityTooLarge + fault = wsgi.Fault(exc(explanation='sorry', + headers={'Retry-After': 4})) + response = request.get_response(fault) + + expected = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + actual = json.loads(response.body) + + self.assertEqual(response.content_type, "application/json") + self.assertEqual(expected, actual) + + def test_raise(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 404) + self.assertTrue('whut?' in resp.body) + + def test_raise_403(self): + """Ensure the ability to raise :class:`Fault` in WSGI-ified methods.""" + @webob.dec.wsgify + def raiser(req): + raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?')) + + req = webob.Request.blank('/.xml') + resp = req.get_response(raiser) + self.assertEqual(resp.content_type, "application/xml") + self.assertEqual(resp.status_int, 403) + self.assertTrue('resizeNotAllowed' not in resp.body) + self.assertTrue('forbidden' in resp.body) + + def test_fault_has_status_int(self): + """Ensure the status_int is set correctly on faults""" + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) + self.assertEqual(fault.status_int, 400) + + def test_xml_serializer(self): + """Ensure that a v1.1 request responds with a v1 xmlns""" + request = webob.Request.blank('/v1', + headers={"Accept": "application/xml"}) + + fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) + response = request.get_response(fault) + + self.assertTrue(common.XML_NS_V1 in response.body) + self.assertEqual(response.content_type, "application/xml") + self.assertEqual(response.status_int, 400) + + +class FaultsXMLSerializationTestV11(test.TestCase): + """Tests covering `cinder.api.openstack.faults:Fault` class.""" + + def _prepare_xml(self, xml_string): + xml_string = xml_string.replace(" ", "") + xml_string = xml_string.replace("\n", "") + xml_string = xml_string.replace("\t", "") + return xml_string + + def test_400_fault(self): + metadata = {'attributes': {"badRequest": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "badRequest": { + "message": "scram", + "code": 400, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + scram + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_413_fault(self): + metadata = {'attributes': {"overLimit": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "overLimit": { + "message": "sorry", + "code": 413, + "retryAfter": 4, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + 4 + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_404_fault(self): + metadata = {'attributes': {"itemNotFound": 'code'}} + serializer = wsgi.XMLDictSerializer(metadata=metadata, + xmlns=common.XML_NS_V1) + + fixture = { + "itemNotFound": { + "message": "sorry", + "code": 404, + }, + } + + output = serializer.serialize(fixture) + actual = minidom.parseString(self._prepare_xml(output)) + + expected = minidom.parseString(self._prepare_xml(""" + + sorry + + """) % common.XML_NS_V1) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/cinder/tests/api/openstack/test_wsgi.py b/cinder/tests/api/openstack/test_wsgi.py new file mode 100644 index 00000000000..f9a4a1c92df --- /dev/null +++ b/cinder/tests/api/openstack/test_wsgi.py @@ -0,0 +1,833 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import inspect +import json +import webob + +from cinder import exception +from cinder import test +from cinder import utils +from cinder.api.openstack import wsgi +from cinder.tests.api.openstack import fakes +import cinder.context + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.body = "" + self.assertEqual(None, request.get_content_type()) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123', method='POST') + request.headers["Content-Type"] = "text/html" + request.body = "asdf
" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept(self): + for content_type in ('application/xml', + 'application/vnd.openstack.volume+xml', + 'application/json', + 'application/vnd.openstack.volume+json'): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = content_type + result = request.best_match_content_type() + self.assertEqual(result, content_type) + + def test_content_type_from_accept_best(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = ("application/json; q=0.3, " + "application/xml; q=0.9") + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class ActionDispatcherTest(test.TestCase): + def test_dispatch(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + self.assertEqual(serializer.dispatch({}, action='create'), 'pants') + + def test_dispatch_action_None(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.dispatch({}, action=None), 'trousers') + + def test_dispatch_default(self): + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.dispatch({}, action='update'), 'trousers') + + +class DictSerializerTest(test.TestCase): + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + self.assertEqual(serializer.serialize({}, 'update'), '') + + +class XMLDictSerializerTest(test.TestCase): + def test_xml(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_xml = '(2,3)' + serializer = wsgi.XMLDictSerializer(xmlns="asdf") + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + +class JSONDictSerializerTest(test.TestCase): + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_json) + + +class TextDeserializerTest(test.TestCase): + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + self.assertEqual(deserializer.deserialize({}, 'update'), {}) + + +class JSONDeserializerTest(test.TestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } + deserializer = wsgi.JSONDeserializer() + self.assertEqual(deserializer.deserialize(data), as_dict) + + +class XMLDeserializerTest(test.TestCase): + def test_xml(self): + xml = """ + + 123 + 1 + 1 + + """.strip() + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } + metadata = {'plurals': {'bs': 'b', 'ts': 't'}} + deserializer = wsgi.XMLDeserializer(metadata=metadata) + self.assertEqual(deserializer.deserialize(xml), as_dict) + + def test_xml_empty(self): + xml = """""" + as_dict = {"body": {"a": {}}} + deserializer = wsgi.XMLDeserializer() + self.assertEqual(deserializer.deserialize(xml), as_dict) + + +class ResourceTest(test.TestCase): + def test_resource_call(self): + class Controller(object): + def index(self, req): + return 'off' + + req = webob.Request.blank('/tests') + app = fakes.TestRouter(Controller()) + response = req.get_response(app) + self.assertEqual(response.body, 'off') + self.assertEqual(response.status_int, 200) + + def test_resource_not_authorized(self): + class Controller(object): + def index(self, req): + raise exception.NotAuthorized() + + req = webob.Request.blank('/tests') + app = fakes.TestRouter(Controller()) + response = req.get_response(app) + self.assertEqual(response.status_int, 403) + + def test_dispatch(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'index', None, '') + actual = resource.dispatch(method, None, {'pants': 'off'}) + expected = 'off' + self.assertEqual(actual, expected) + + def test_get_method_unknown_controller_action(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(AttributeError, resource.get_method, + None, 'create', None, '') + + def test_get_method_action_json(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/json', + '{"fooAction": true}') + self.assertEqual(controller._action_foo, method) + + def test_get_method_action_xml(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/xml', + 'true') + self.assertEqual(controller._action_foo, method) + + def test_get_method_action_bad_body(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(exception.MalformedRequestBody, resource.get_method, + None, 'action', 'application/json', '{}') + + def test_get_method_unknown_controller_action(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises(KeyError, resource.get_method, + None, 'action', 'application/json', + '{"barAction": true}') + + def test_get_method_action_method(self): + class Controller(): + def action(self, req, pants=None): + return pants + + controller = Controller() + resource = wsgi.Resource(controller) + method, extensions = resource.get_method(None, 'action', + 'application/xml', + 'true\n" + 'foobar') + root = xmlutil.make_flat_dict('wrapper') + tmpl = xmlutil.MasterTemplate(root, 1) + result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) + self.assertEqual(result, expected_xml) diff --git a/cinder/tests/api/openstack/volume/__init__.py b/cinder/tests/api/openstack/volume/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/api/openstack/volume/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/volume/contrib/__init__.py b/cinder/tests/api/openstack/volume/contrib/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/api/openstack/volume/contrib/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py b/cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py new file mode 100644 index 00000000000..01dd0bf823c --- /dev/null +++ b/cinder/tests/api/openstack/volume/contrib/test_types_extra_specs.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.openstack.volume.contrib import types_extra_specs +from cinder import test +from cinder.tests.api.openstack import fakes +import cinder.wsgi + + +def return_create_volume_type_extra_specs(context, volume_type_id, + extra_specs): + return stub_volume_type_extra_specs() + + +def return_volume_type_extra_specs(context, volume_type_id): + return stub_volume_type_extra_specs() + + +def return_empty_volume_type_extra_specs(context, volume_type_id): + return {} + + +def delete_volume_type_extra_specs(context, volume_type_id, key): + pass + + +def stub_volume_type_extra_specs(): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return specs + + +def volume_type_get(context, volume_type_id): + pass + + +class VolumeTypesExtraSpecsTest(test.TestCase): + + def setUp(self): + super(VolumeTypesExtraSpecsTest, self).setUp() + self.stubs.Set(cinder.db, 'volume_type_get', volume_type_get) + self.api_path = '/v1/fake/os-volume-types/1/extra_specs' + self.controller = types_extra_specs.VolumeTypeExtraSpecsController() + + def test_index(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_index_no_data(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.index(req, 1) + + self.assertEqual(0, len(res_dict['extra_specs'])) + + def test_show(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + res_dict = self.controller.show(req, 1, 'key5') + + self.assertEqual('value5', res_dict['key5']) + + def test_show_spec_not_found(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_get', + return_empty_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key6') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, 1, 'key6') + + def test_delete(self): + self.stubs.Set(cinder.db, 'volume_type_extra_specs_delete', + delete_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key5') + self.controller.delete(req, 1, 'key5') + + def test_create(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"extra_specs": {"key1": "value1"}} + + req = fakes.HTTPRequest.blank(self.api_path) + res_dict = self.controller.create(req, 1, body) + + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_create_empty_body(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, + req, 1, '') + + def test_update_item(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + res_dict = self.controller.update(req, 1, 'key1', body) + + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_empty_body(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'key1', '') + + def test_update_item_too_many_keys(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1", "key2": "value2"} + + req = fakes.HTTPRequest.blank(self.api_path + '/key1') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'key1', body) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(cinder.db, + 'volume_type_extra_specs_update_or_create', + return_create_volume_type_extra_specs) + body = {"key1": "value1"} + + req = fakes.HTTPRequest.blank(self.api_path + '/bad') + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 1, 'bad', body) + + +class VolumeTypeExtraSpecsSerializerTest(test.TestCase): + def test_index_create_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecsTemplate() + + # Just getting some input data + extra_specs = stub_volume_type_extra_specs() + text = serializer.serialize(dict(extra_specs=extra_specs)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('extra_specs', tree.tag) + self.assertEqual(len(extra_specs), len(tree)) + seen = set(extra_specs.keys()) + for child in tree: + self.assertTrue(child.tag in seen) + self.assertEqual(extra_specs[child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_update_show_serializer(self): + serializer = types_extra_specs.VolumeTypeExtraSpecTemplate() + + exemplar = dict(key1='value1') + text = serializer.serialize(exemplar) + + print text + tree = etree.fromstring(text) + + self.assertEqual('key1', tree.tag) + self.assertEqual('value1', tree.text) + self.assertEqual(0, len(tree)) diff --git a/cinder/tests/api/openstack/volume/contrib/test_types_manage.py b/cinder/tests/api/openstack/volume/contrib/test_types_manage.py new file mode 100644 index 00000000000..f85bf98fd8a --- /dev/null +++ b/cinder/tests/api/openstack/volume/contrib/test_types_manage.py @@ -0,0 +1,103 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from cinder.api.openstack.volume.contrib import types_manage +from cinder import exception +from cinder import test +from cinder.volume import volume_types +from cinder.tests.api.openstack import fakes + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_destroy(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + pass + + +def return_volume_types_create(context, name, specs): + pass + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesManageApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesManageApiTest, self).setUp() + self.controller = types_manage.VolumeTypesManageController() + + def test_volume_types_delete(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + req = fakes.HTTPRequest.blank('/v1/fake/types/1') + self.controller._delete(req, 1) + + def test_volume_types_delete_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + self.stubs.Set(volume_types, 'destroy', + return_volume_types_destroy) + + req = fakes.HTTPRequest.blank('/v1/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller._delete, + req, '777') + + def test_create(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + + body = {"volume_type": {"name": "vol_type_1", + "extra_specs": {"key1": "value1"}}} + req = fakes.HTTPRequest.blank('/v1/fake/types') + res_dict = self.controller._create(req, body) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_create_empty_body(self): + self.stubs.Set(volume_types, 'create', + return_volume_types_create) + self.stubs.Set(volume_types, 'get_volume_type_by_name', + return_volume_types_get_by_name) + + req = fakes.HTTPRequest.blank('/v1/fake/types') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller._create, req, '') diff --git a/cinder/tests/api/openstack/volume/extensions/__init__.py b/cinder/tests/api/openstack/volume/extensions/__init__.py new file mode 100644 index 00000000000..848908a953a --- /dev/null +++ b/cinder/tests/api/openstack/volume/extensions/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/cinder/tests/api/openstack/volume/extensions/foxinsocks.py b/cinder/tests/api/openstack/volume/extensions/foxinsocks.py new file mode 100644 index 00000000000..5fcd6a2ba05 --- /dev/null +++ b/cinder/tests/api/openstack/volume/extensions/foxinsocks.py @@ -0,0 +1,94 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob.exc + +from cinder.api.openstack import extensions +from cinder.api.openstack import wsgi + + +class FoxInSocksController(object): + + def index(self, req): + return "Try to say this Mr. Knox, sir..." + + +class FoxInSocksServerControllerExtension(wsgi.Controller): + @wsgi.action('add_tweedle') + def _add_tweedle(self, req, id, body): + + return "Tweedle Beetle Added." + + @wsgi.action('delete_tweedle') + def _delete_tweedle(self, req, id, body): + + return "Tweedle Beetle Deleted." + + @wsgi.action('fail') + def _fail(self, req, id, body): + + raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') + + +class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') + + +class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): + @wsgi.extends + def show(self, req, resp_obj, id): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + resp_obj.obj['big_bands'] = 'Pig Bands!' + + +class Foxinsocks(extensions.ExtensionDescriptor): + """The Fox In Socks Extension""" + + name = "Fox In Socks" + alias = "FOXNSOX" + namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" + updated = "2011-01-22T13:25:27-06:00" + + def __init__(self, ext_mgr): + ext_mgr.register(self) + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_controller_extensions(self): + extension_list = [] + + extension_set = [ + (FoxInSocksServerControllerExtension, 'servers'), + (FoxInSocksFlavorGooseControllerExtension, 'flavors'), + (FoxInSocksFlavorBandsControllerExtension, 'flavors'), + ] + for klass, collection in extension_set: + controller = klass() + ext = extensions.ControllerExtension(self, collection, controller) + extension_list.append(ext) + + return extension_list diff --git a/cinder/tests/api/openstack/volume/test_extensions.py b/cinder/tests/api/openstack/volume/test_extensions.py new file mode 100644 index 00000000000..62b4beba465 --- /dev/null +++ b/cinder/tests/api/openstack/volume/test_extensions.py @@ -0,0 +1,156 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import webob +from lxml import etree +import iso8601 + +from cinder.api.openstack import volume +from cinder.api.openstack import xmlutil +from cinder import flags +from cinder import test + +FLAGS = flags.FLAGS +NS = "{http://docs.openstack.org/common/api/v1.0}" + + +class ExtensionTestCase(test.TestCase): + def setUp(self): + super(ExtensionTestCase, self).setUp() + ext_list = FLAGS.osapi_volume_extension[:] + fox = ('cinder.tests.api.openstack.volume.extensions.' + 'foxinsocks.Foxinsocks') + if fox not in ext_list: + ext_list.append(fox) + self.flags(osapi_volume_extension=ext_list) + + +class ExtensionControllerTest(ExtensionTestCase): + + def setUp(self): + super(ExtensionControllerTest, self).setUp() + self.ext_list = [ + "TypesManage", + "TypesExtraSpecs", + ] + self.ext_list.sort() + + def test_list_extensions_json(self): + app = volume.APIRouter() + request = webob.Request.blank("/fake/extensions") + response = request.get_response(app) + self.assertEqual(200, response.status_int) + + # Make sure we have all the extensions, extra extensions being OK. + data = json.loads(response.body) + names = [str(x['name']) for x in data['extensions'] + if str(x['name']) in self.ext_list] + names.sort() + self.assertEqual(names, self.ext_list) + + # Ensure all the timestamps are valid according to iso8601 + for ext in data['extensions']: + iso8601.parse_date(ext['updated']) + + # Make sure that at least Fox in Sox is correct. + (fox_ext, ) = [ + x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] + self.assertEqual(fox_ext, { + 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', + 'name': 'Fox In Socks', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'The Fox In Socks Extension', + 'alias': 'FOXNSOX', + 'links': [] + }, + ) + + for ext in data['extensions']: + url = '/fake/extensions/%s' % ext['alias'] + request = webob.Request.blank(url) + response = request.get_response(app) + output = json.loads(response.body) + self.assertEqual(output['extension']['alias'], ext['alias']) + + def test_get_extension_json(self): + app = volume.APIRouter() + request = webob.Request.blank("/fake/extensions/FOXNSOX") + response = request.get_response(app) + self.assertEqual(200, response.status_int) + + data = json.loads(response.body) + self.assertEqual(data['extension'], { + "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0", + "name": "Fox In Socks", + "updated": "2011-01-22T13:25:27-06:00", + "description": "The Fox In Socks Extension", + "alias": "FOXNSOX", + "links": []}) + + def test_get_non_existing_extension_json(self): + app = volume.APIRouter() + request = webob.Request.blank("/fake/extensions/4") + response = request.get_response(app) + self.assertEqual(404, response.status_int) + + def test_list_extensions_xml(self): + app = volume.APIRouter() + request = webob.Request.blank("/fake/extensions") + request.accept = "application/xml" + response = request.get_response(app) + self.assertEqual(200, response.status_int) + + root = etree.XML(response.body) + self.assertEqual(root.tag.split('extensions')[0], NS) + + # Make sure we have all the extensions, extras extensions being OK. + exts = root.findall('{0}extension'.format(NS)) + self.assert_(len(exts) >= len(self.ext_list)) + + # Make sure that at least Fox in Sox is correct. + (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + self.assertEqual(fox_ext.get('name'), 'Fox In Socks') + self.assertEqual(fox_ext.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + + xmlutil.validate_schema(root, 'extensions') + + def test_get_extension_xml(self): + app = volume.APIRouter() + request = webob.Request.blank("/fake/extensions/FOXNSOX") + request.accept = "application/xml" + response = request.get_response(app) + self.assertEqual(200, response.status_int) + xml = response.body + + root = etree.XML(xml) + self.assertEqual(root.tag.split('extension')[0], NS) + self.assertEqual(root.get('alias'), 'FOXNSOX') + self.assertEqual(root.get('name'), 'Fox In Socks') + self.assertEqual(root.get('namespace'), + 'http://www.fox.in.socks/api/ext/pie/v1.0') + self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00') + self.assertEqual(root.findtext('{0}description'.format(NS)), + 'The Fox In Socks Extension') + + xmlutil.validate_schema(root, 'extension') diff --git a/cinder/tests/api/openstack/volume/test_router.py b/cinder/tests/api/openstack/volume/test_router.py new file mode 100644 index 00000000000..f7e7afb6c91 --- /dev/null +++ b/cinder/tests/api/openstack/volume/test_router.py @@ -0,0 +1,105 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.api.openstack import volume +from cinder.api.openstack.volume import snapshots +from cinder.api.openstack.volume import volumes +from cinder.api.openstack.volume import versions +from cinder.api.openstack import wsgi +from cinder import flags +from cinder import log as logging +from cinder import test +from cinder.tests.api.openstack import fakes + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +class FakeController(object): + def index(self, req): + return {} + + def detail(self, req): + return {} + + +def create_resource(): + return wsgi.Resource(FakeController()) + + +class VolumeRouterTestCase(test.TestCase): + def setUp(self): + super(VolumeRouterTestCase, self).setUp() + # NOTE(vish): versions is just returning text so, no need to stub. + self.stubs.Set(snapshots, 'create_resource', create_resource) + self.stubs.Set(volumes, 'create_resource', create_resource) + self.app = volume.APIRouter() + + def test_versions(self): + req = fakes.HTTPRequest.blank('') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(302, response.status_int) + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_versions_dispatch(self): + req = fakes.HTTPRequest.blank('/') + req.method = 'GET' + req.content_type = 'application/json' + resource = versions.Versions() + result = resource.dispatch(resource.index, req, {}) + self.assertTrue(result) + + def test_volumes(self): + req = fakes.HTTPRequest.blank('/fake/volumes') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_volumes_detail(self): + req = fakes.HTTPRequest.blank('/fake/volumes/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_types(self): + req = fakes.HTTPRequest.blank('/fake/types') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots(self): + req = fakes.HTTPRequest.blank('/fake/snapshots') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) + + def test_snapshots_detail(self): + req = fakes.HTTPRequest.blank('/fake/snapshots/detail') + req.method = 'GET' + req.content_type = 'application/json' + response = req.get_response(self.app) + self.assertEqual(200, response.status_int) diff --git a/cinder/tests/api/openstack/volume/test_snapshots.py b/cinder/tests/api/openstack/volume/test_snapshots.py new file mode 100644 index 00000000000..8c87da51461 --- /dev/null +++ b/cinder/tests/api/openstack/volume/test_snapshots.py @@ -0,0 +1,214 @@ +# Copyright 2011 Denali Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api.openstack.volume import snapshots +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import test +from cinder import volume +from cinder.tests.api.openstack import fakes + +FLAGS = flags.FLAGS + +LOG = logging.getLogger(__name__) + + +def _get_default_snapshot_param(): + return { + 'id': 123, + 'volume_id': 12, + 'status': 'available', + 'volume_size': 100, + 'created_at': None, + 'display_name': 'Default name', + 'display_description': 'Default description', + } + + +def stub_snapshot_create(self, context, volume_id, name, description): + snapshot = _get_default_snapshot_param() + snapshot['volume_id'] = volume_id + snapshot['display_name'] = name + snapshot['display_description'] = description + return snapshot + + +def stub_snapshot_delete(self, context, snapshot): + if snapshot['id'] != 123: + raise exception.NotFound + + +def stub_snapshot_get(self, context, snapshot_id): + if snapshot_id != 123: + raise exception.NotFound + + param = _get_default_snapshot_param() + return param + + +def stub_snapshot_get_all(self, context): + param = _get_default_snapshot_param() + return [param] + + +class SnapshotApiTest(test.TestCase): + def setUp(self): + super(SnapshotApiTest, self).setUp() + self.controller = snapshots.SnapshotsController() + + self.stubs.Set(volume.api.API, "get_snapshot", stub_snapshot_get) + self.stubs.Set(volume.api.API, "get_all_snapshots", + stub_snapshot_get_all) + + def test_snapshot_create(self): + self.stubs.Set(volume.api.API, "create_snapshot", stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) + snapshot = {"volume_id": '12', + "force": False, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['display_name'], + snapshot['display_name']) + self.assertEqual(resp_dict['snapshot']['display_description'], + snapshot['display_description']) + + def test_snapshot_create_force(self): + self.stubs.Set(volume.api.API, "create_snapshot_force", + stub_snapshot_create) + self.stubs.Set(volume.api.API, 'get', fakes.stub_volume_get) + snapshot = {"volume_id": '12', + "force": True, + "display_name": "Snapshot Test Name", + "display_description": "Snapshot Test Desc"} + body = dict(snapshot=snapshot) + req = fakes.HTTPRequest.blank('/v1/snapshots') + resp_dict = self.controller.create(req, body) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['display_name'], + snapshot['display_name']) + self.assertEqual(resp_dict['snapshot']['display_description'], + snapshot['display_description']) + + def test_snapshot_delete(self): + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + + snapshot_id = 123 + req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) + resp = self.controller.delete(req, snapshot_id) + self.assertEqual(resp.status_int, 202) + + def test_snapshot_delete_invalid_id(self): + self.stubs.Set(volume.api.API, "delete_snapshot", stub_snapshot_delete) + snapshot_id = 234 + req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + snapshot_id) + + def test_snapshot_show(self): + req = fakes.HTTPRequest.blank('/v1/snapshots/123') + resp_dict = self.controller.show(req, 123) + + self.assertTrue('snapshot' in resp_dict) + self.assertEqual(resp_dict['snapshot']['id'], '123') + + def test_snapshot_show_invalid_id(self): + snapshot_id = 234 + req = fakes.HTTPRequest.blank('/v1/snapshots/%d' % snapshot_id) + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, + snapshot_id) + + def test_snapshot_detail(self): + req = fakes.HTTPRequest.blank('/v1/snapshots/detail') + resp_dict = self.controller.detail(req) + + self.assertTrue('snapshots' in resp_dict) + resp_snapshots = resp_dict['snapshots'] + self.assertEqual(len(resp_snapshots), 1) + + resp_snapshot = resp_snapshots.pop() + self.assertEqual(resp_snapshot['id'], '123') + + +class SnapshotSerializerTest(test.TestCase): + def _verify_snapshot(self, snap, tree): + self.assertEqual(tree.tag, 'snapshot') + + for attr in ('id', 'status', 'size', 'created_at', + 'display_name', 'display_description', 'volume_id'): + self.assertEqual(str(snap[attr]), tree.get(attr)) + + def test_snapshot_show_create_serializer(self): + serializer = snapshots.SnapshotTemplate() + raw_snapshot = dict( + id='snap_id', + status='snap_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap_name', + display_description='snap_desc', + volume_id='vol_id', + ) + text = serializer.serialize(dict(snapshot=raw_snapshot)) + + print text + tree = etree.fromstring(text) + + self._verify_snapshot(raw_snapshot, tree) + + def test_snapshot_index_detail_serializer(self): + serializer = snapshots.SnapshotsTemplate() + raw_snapshots = [dict( + id='snap1_id', + status='snap1_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap1_name', + display_description='snap1_desc', + volume_id='vol1_id', + ), + dict( + id='snap2_id', + status='snap2_status', + size=1024, + created_at=datetime.datetime.now(), + display_name='snap2_name', + display_description='snap2_desc', + volume_id='vol2_id', + )] + text = serializer.serialize(dict(snapshots=raw_snapshots)) + + print text + tree = etree.fromstring(text) + + self.assertEqual('snapshots', tree.tag) + self.assertEqual(len(raw_snapshots), len(tree)) + for idx, child in enumerate(tree): + self._verify_snapshot(raw_snapshots[idx], child) diff --git a/cinder/tests/api/openstack/volume/test_types.py b/cinder/tests/api/openstack/volume/test_types.py new file mode 100644 index 00000000000..e8d98d7de41 --- /dev/null +++ b/cinder/tests/api/openstack/volume/test_types.py @@ -0,0 +1,146 @@ +# Copyright 2011 OpenStack LLC. +# aLL Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree +import webob + +from cinder.api.openstack.volume import types +from cinder import exception +from cinder import test +from cinder.volume import volume_types +from cinder.tests.api.openstack import fakes + + +def stub_volume_type(id): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return dict(id=id, name='vol_type_%s' % str(id), extra_specs=specs) + + +def return_volume_types_get_all_types(context): + return dict(vol_type_1=stub_volume_type(1), + vol_type_2=stub_volume_type(2), + vol_type_3=stub_volume_type(3)) + + +def return_empty_volume_types_get_all_types(context): + return {} + + +def return_volume_types_get_volume_type(context, id): + if id == "777": + raise exception.VolumeTypeNotFound(volume_type_id=id) + return stub_volume_type(int(id)) + + +def return_volume_types_get_by_name(context, name): + if name == "777": + raise exception.VolumeTypeNotFoundByName(volume_type_name=name) + return stub_volume_type(int(name.split("_")[2])) + + +class VolumeTypesApiTest(test.TestCase): + def setUp(self): + super(VolumeTypesApiTest, self).setUp() + self.controller = types.VolumeTypesController() + + def test_volume_types_index(self): + self.stubs.Set(volume_types, 'get_all_types', + return_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v1/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(3, len(res_dict['volume_types'])) + + expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] + actual_names = map(lambda e: e['name'], res_dict['volume_types']) + self.assertEqual(set(actual_names), set(expected_names)) + for entry in res_dict['volume_types']: + self.assertEqual('value1', entry['extra_specs']['key1']) + + def test_volume_types_index_no_data(self): + self.stubs.Set(volume_types, 'get_all_types', + return_empty_volume_types_get_all_types) + + req = fakes.HTTPRequest.blank('/v1/fake/types') + res_dict = self.controller.index(req) + + self.assertEqual(0, len(res_dict['volume_types'])) + + def test_volume_types_show(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v1/fake/types/1') + res_dict = self.controller.show(req, 1) + + self.assertEqual(1, len(res_dict)) + self.assertEqual('1', res_dict['volume_type']['id']) + self.assertEqual('vol_type_1', res_dict['volume_type']['name']) + + def test_volume_types_show_not_found(self): + self.stubs.Set(volume_types, 'get_volume_type', + return_volume_types_get_volume_type) + + req = fakes.HTTPRequest.blank('/v1/fake/types/777') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, + req, '777') + + +class VolumeTypesSerializerTest(test.TestCase): + def _verify_volume_type(self, vtype, tree): + self.assertEqual('volume_type', tree.tag) + self.assertEqual(vtype['name'], tree.get('name')) + self.assertEqual(str(vtype['id']), tree.get('id')) + self.assertEqual(1, len(tree)) + extra_specs = tree[0] + self.assertEqual('extra_specs', extra_specs.tag) + seen = set(vtype['extra_specs'].keys()) + for child in extra_specs: + self.assertTrue(child.tag in seen) + self.assertEqual(vtype['extra_specs'][child.tag], child.text) + seen.remove(child.tag) + self.assertEqual(len(seen), 0) + + def test_index_serializer(self): + serializer = types.VolumeTypesTemplate() + + # Just getting some input data + vtypes = return_volume_types_get_all_types(None) + text = serializer.serialize({'volume_types': vtypes.values()}) + + tree = etree.fromstring(text) + + self.assertEqual('volume_types', tree.tag) + self.assertEqual(len(vtypes), len(tree)) + for child in tree: + name = child.get('name') + self.assertTrue(name in vtypes) + self._verify_volume_type(vtypes[name], child) + + def test_voltype_serializer(self): + serializer = types.VolumeTypeTemplate() + + vtype = stub_volume_type(1) + text = serializer.serialize(dict(volume_type=vtype)) + + tree = etree.fromstring(text) + + self._verify_volume_type(vtype, tree) diff --git a/cinder/tests/api/openstack/volume/test_volumes.py b/cinder/tests/api/openstack/volume/test_volumes.py new file mode 100644 index 00000000000..9563989a91b --- /dev/null +++ b/cinder/tests/api/openstack/volume/test_volumes.py @@ -0,0 +1,290 @@ +# Copyright 2013 Josh Durgin +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from lxml import etree +import webob + +from cinder.api.openstack.volume import volumes +from cinder import flags +from cinder import test +from cinder.tests.api.openstack import fakes +from cinder.volume import api as volume_api + + +FLAGS = flags.FLAGS +NS = '{http://docs.openstack.org/volume/api/v1}' + + +class VolumeApiTest(test.TestCase): + def setUp(self): + super(VolumeApiTest, self).setUp() + self.controller = volumes.VolumeController() + + self.stubs.Set(volume_api.API, 'get_all', fakes.stub_volume_get_all) + self.stubs.Set(volume_api.API, 'get', fakes.stub_volume_get) + self.stubs.Set(volume_api.API, 'delete', fakes.stub_volume_delete) + + def test_volume_create(self): + self.stubs.Set(volume_api.API, "create", fakes.stub_volume_create) + + vol = {"size": 100, + "display_name": "Volume Test Name", + "display_description": "Volume Test Desc", + "availability_zone": "zone1:host1"} + body = {"volume": vol} + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.create(req, body) + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'Volume Test Desc', + 'availability_zone': 'zone1:host1', + 'display_name': 'Volume Test Name', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 100}} + self.assertEqual(res_dict, expected) + + def test_volume_create_no_body(self): + body = {} + req = fakes.HTTPRequest.blank('/v1/volumes') + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.controller.create, + req, + body) + + def test_volume_list(self): + req = fakes.HTTPRequest.blank('/v1/volumes') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_list_detail(self): + req = fakes.HTTPRequest.blank('/v1/volumes/detail') + res_dict = self.controller.index(req) + expected = {'volumes': [{'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}]} + self.assertEqual(res_dict, expected) + + def test_volume_show(self): + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, 1) + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [{'device': '/', + 'server_id': 'fakeuuid', + 'id': '1', + 'volume_id': '1'}], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_no_attachments(self): + def stub_volume_get(self, context, volume_id): + return fakes.stub_volume(volume_id, attach_status='detached') + + self.stubs.Set(volume_api.API, 'get', stub_volume_get) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + res_dict = self.controller.show(req, 1) + expected = {'volume': {'status': 'fakestatus', + 'display_description': 'displaydesc', + 'availability_zone': 'fakeaz', + 'display_name': 'displayname', + 'attachments': [], + 'volume_type': 'vol_type_name', + 'snapshot_id': None, + 'metadata': {}, + 'id': '1', + 'created_at': datetime.datetime(1, 1, 1, + 1, 1, 1), + 'size': 1}} + self.assertEqual(res_dict, expected) + + def test_volume_show_no_volume(self): + self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.show, + req, + 1) + + def test_volume_delete(self): + req = fakes.HTTPRequest.blank('/v1/volumes/1') + resp = self.controller.delete(req, 1) + self.assertEqual(resp.status_int, 202) + + def test_volume_delete_no_volume(self): + self.stubs.Set(volume_api.API, "get", fakes.stub_volume_get_notfound) + + req = fakes.HTTPRequest.blank('/v1/volumes/1') + self.assertRaises(webob.exc.HTTPNotFound, + self.controller.delete, + req, + 1) + + +class VolumeSerializerTest(test.TestCase): + def _verify_volume_attachment(self, attach, tree): + for attr in ('id', 'volume_id', 'server_id', 'device'): + self.assertEqual(str(attach[attr]), tree.get(attr)) + + def _verify_volume(self, vol, tree): + self.assertEqual(tree.tag, NS + 'volume') + + for attr in ('id', 'status', 'size', 'availability_zone', 'created_at', + 'display_name', 'display_description', 'volume_type', + 'snapshot_id'): + self.assertEqual(str(vol[attr]), tree.get(attr)) + + for child in tree: + print child.tag + self.assertTrue(child.tag in (NS + 'attachments', NS + 'metadata')) + if child.tag == 'attachments': + self.assertEqual(1, len(child)) + self.assertEqual('attachment', child[0].tag) + self._verify_volume_attachment(vol['attachments'][0], child[0]) + elif child.tag == 'metadata': + not_seen = set(vol['metadata'].keys()) + for gr_child in child: + self.assertTrue(gr_child.tag in not_seen) + self.assertEqual(str(vol['metadata'][gr_child.tag]), + gr_child.text) + not_seen.remove(gr_child.tag) + self.assertEqual(0, len(not_seen)) + + def test_volume_show_create_serializer(self): + serializer = volumes.VolumeTemplate() + raw_volume = dict( + id='vol_id', + status='vol_status', + size=1024, + availability_zone='vol_availability', + created_at=datetime.datetime.now(), + attachments=[dict( + id='vol_id', + volume_id='vol_id', + server_id='instance_uuid', + device='/foo')], + display_name='vol_name', + display_description='vol_desc', + volume_type='vol_type', + snapshot_id='snap_id', + metadata=dict( + foo='bar', + baz='quux', + ), + ) + text = serializer.serialize(dict(volume=raw_volume)) + + print text + tree = etree.fromstring(text) + + self._verify_volume(raw_volume, tree) + + def test_volume_index_detail_serializer(self): + serializer = volumes.VolumesTemplate() + raw_volumes = [dict( + id='vol1_id', + status='vol1_status', + size=1024, + availability_zone='vol1_availability', + created_at=datetime.datetime.now(), + attachments=[dict( + id='vol1_id', + volume_id='vol1_id', + server_id='instance_uuid', + device='/foo1')], + display_name='vol1_name', + display_description='vol1_desc', + volume_type='vol1_type', + snapshot_id='snap1_id', + metadata=dict( + foo='vol1_foo', + bar='vol1_bar', + ), + ), + dict( + id='vol2_id', + status='vol2_status', + size=1024, + availability_zone='vol2_availability', + created_at=datetime.datetime.now(), + attachments=[dict( + id='vol2_id', + volume_id='vol2_id', + server_id='instance_uuid', + device='/foo2')], + display_name='vol2_name', + display_description='vol2_desc', + volume_type='vol2_type', + snapshot_id='snap2_id', + metadata=dict( + foo='vol2_foo', + bar='vol2_bar', + ), + )] + text = serializer.serialize(dict(volumes=raw_volumes)) + + print text + tree = etree.fromstring(text) + + self.assertEqual(NS + 'volumes', tree.tag) + self.assertEqual(len(raw_volumes), len(tree)) + for idx, child in enumerate(tree): + self._verify_volume(raw_volumes[idx], child) diff --git a/cinder/tests/api/test_auth.py b/cinder/tests/api/test_auth.py new file mode 100644 index 00000000000..cfb8b7775c4 --- /dev/null +++ b/cinder/tests/api/test_auth.py @@ -0,0 +1,58 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +import cinder.api.auth +from cinder import test + + +class TestCinderKeystoneContextMiddleware(test.TestCase): + + def setUp(self): + super(TestCinderKeystoneContextMiddleware, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + self.context = req.environ['cinder.context'] + return webob.Response() + + self.context = None + self.middleware = cinder.api.auth.CinderKeystoneContext(fake_app) + self.request = webob.Request.blank('/') + self.request.headers['X_TENANT_ID'] = 'testtenantid' + self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' + + def test_no_user_or_user_id(self): + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '401 Unauthorized') + + def test_user_only(self): + self.request.headers['X_USER_ID'] = 'testuserid' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') + + def test_user_id_only(self): + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuser') + + def test_user_id_trumps_user(self): + self.request.headers['X_USER_ID'] = 'testuserid' + self.request.headers['X_USER'] = 'testuser' + response = self.request.get_response(self.middleware) + self.assertEqual(response.status, '200 OK') + self.assertEqual(self.context.user_id, 'testuserid') diff --git a/cinder/tests/api/test_sizelimit.py b/cinder/tests/api/test_sizelimit.py new file mode 100644 index 00000000000..280ee9c29d7 --- /dev/null +++ b/cinder/tests/api/test_sizelimit.py @@ -0,0 +1,51 @@ +# Copyright (c) 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +import cinder.api.sizelimit +from cinder import flags +from cinder import test + +FLAGS = flags.FLAGS +MAX_REQUEST_BODY_SIZE = FLAGS.osapi_max_request_body_size + + +class TestRequestBodySizeLimiter(test.TestCase): + + def setUp(self): + super(TestRequestBodySizeLimiter, self).setUp() + + @webob.dec.wsgify() + def fake_app(req): + return webob.Response() + + self.middleware = cinder.api.sizelimit.RequestBodySizeLimiter(fake_app) + self.request = webob.Request.blank('/', method='POST') + + def test_content_length_acceptable(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + self.request.body = "0" * MAX_REQUEST_BODY_SIZE + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 200) + + def test_content_length_to_large(self): + self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1 + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 400) + + def test_request_to_large(self): + self.request.body = "0" * (MAX_REQUEST_BODY_SIZE + 1) + response = self.request.get_response(self.middleware) + self.assertEqual(response.status_int, 400) diff --git a/cinder/tests/api/test_wsgi.py b/cinder/tests/api/test_wsgi.py new file mode 100644 index 00000000000..c68f8158eb5 --- /dev/null +++ b/cinder/tests/api/test_wsgi.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test WSGI basics and provide some helper functions for other WSGI tests. +""" + +from cinder import test + +import routes +import webob + +from cinder import wsgi + + +class Test(test.TestCase): + + def test_debug(self): + + class Application(wsgi.Application): + """Dummy application to test debug.""" + + def __call__(self, environ, start_response): + start_response("200", [("X-Test", "checking")]) + return ['Test result'] + + application = wsgi.Debug(Application()) + result = webob.Request.blank('/').get_response(application) + self.assertEqual(result.body, "Test result") + + def test_router(self): + + class Application(wsgi.Application): + """Test application to call from router.""" + + def __call__(self, environ, start_response): + start_response("200", []) + return ['Router result'] + + class Router(wsgi.Router): + """Test router.""" + + def __init__(self): + mapper = routes.Mapper() + mapper.connect("/test", controller=Application()) + super(Router, self).__init__(mapper) + + result = webob.Request.blank('/test').get_response(Router()) + self.assertEqual(result.body, "Router result") + result = webob.Request.blank('/bad').get_response(Router()) + self.assertNotEqual(result.body, "Router result") diff --git a/cinder/tests/db/__init__.py b/cinder/tests/db/__init__.py new file mode 100644 index 00000000000..2d43aac42ff --- /dev/null +++ b/cinder/tests/db/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`db` -- Stubs for DB API +============================= +""" diff --git a/cinder/tests/db/fakes.py b/cinder/tests/db/fakes.py new file mode 100644 index 00000000000..5cd2284d25b --- /dev/null +++ b/cinder/tests/db/fakes.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Stubouts, mocks and fixtures for the test suite""" + +from cinder import db +from cinder import exception + + +class FakeModel(object): + """Stubs out for model.""" + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def __getitem__(self, key): + if key in self.values: + return self.values[key] + else: + raise NotImplementedError() + + def __repr__(self): + return '' % self.values + + +def stub_out(stubs, funcs): + """Set the stubs in mapping in the db api.""" + for func in funcs: + func_name = '_'.join(func.__name__.split('_')[1:]) + stubs.Set(db, func_name, func) diff --git a/cinder/tests/declare_flags.py b/cinder/tests/declare_flags.py new file mode 100644 index 00000000000..ee4733fc108 --- /dev/null +++ b/cinder/tests/declare_flags.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags +from cinder.openstack.common import cfg + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.IntOpt('answer', default=42, help='test flag')) diff --git a/cinder/tests/fake_flags.py b/cinder/tests/fake_flags.py new file mode 100644 index 00000000000..89d0f84f788 --- /dev/null +++ b/cinder/tests/fake_flags.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags + +FLAGS = flags.FLAGS + +flags.DECLARE('volume_driver', 'cinder.volume.manager') +FLAGS.set_default('volume_driver', 'cinder.volume.driver.FakeISCSIDriver') +FLAGS.set_default('connection_type', 'fake') +FLAGS.set_default('fake_rabbit', True) +FLAGS.set_default('rpc_backend', 'cinder.rpc.impl_fake') +flags.DECLARE('iscsi_num_targets', 'cinder.volume.driver') +FLAGS.set_default('iscsi_num_targets', 8) +FLAGS.set_default('verbose', True) +FLAGS.set_default('sql_connection', "sqlite://") +FLAGS.set_default('sqlite_synchronous', False) +flags.DECLARE('policy_file', 'cinder.policy') +FLAGS.set_default('policy_file', 'cinder/tests/policy.json') diff --git a/cinder/tests/fake_utils.py b/cinder/tests/fake_utils.py new file mode 100644 index 00000000000..02f0e14cadc --- /dev/null +++ b/cinder/tests/fake_utils.py @@ -0,0 +1,112 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""This modules stubs out functions in cinder.utils.""" + +import re + +from eventlet import greenthread + +from cinder import exception +from cinder import log as logging +from cinder import utils + +LOG = logging.getLogger(__name__) + +_fake_execute_repliers = [] +_fake_execute_log = [] + + +def fake_execute_get_log(): + return _fake_execute_log + + +def fake_execute_clear_log(): + global _fake_execute_log + _fake_execute_log = [] + + +def fake_execute_set_repliers(repliers): + """Allows the client to configure replies to commands.""" + global _fake_execute_repliers + _fake_execute_repliers = repliers + + +def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): + """A reply handler for commands that haven't been added to the reply list. + + Returns empty strings for stdout and stderr. + + """ + return '', '' + + +def fake_execute(*cmd_parts, **kwargs): + """This function stubs out execute. + + It optionally executes a preconfigued function to return expected data. + + """ + global _fake_execute_repliers + + process_input = kwargs.get('process_input', None) + check_exit_code = kwargs.get('check_exit_code', 0) + delay_on_retry = kwargs.get('delay_on_retry', True) + attempts = kwargs.get('attempts', 1) + run_as_root = kwargs.get('run_as_root', False) + cmd_str = ' '.join(str(part) for part in cmd_parts) + + LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str) + _fake_execute_log.append(cmd_str) + + reply_handler = fake_execute_default_reply_handler + + for fake_replier in _fake_execute_repliers: + if re.match(fake_replier[0], cmd_str): + reply_handler = fake_replier[1] + LOG.debug(_('Faked command matched %s') % fake_replier[0]) + break + + if isinstance(reply_handler, basestring): + # If the reply handler is a string, return it as stdout + reply = reply_handler, '' + else: + try: + # Alternative is a function, so call it + reply = reply_handler(cmd_parts, + process_input=process_input, + delay_on_retry=delay_on_retry, + attempts=attempts, + run_as_root=run_as_root, + check_exit_code=check_exit_code) + except exception.ProcessExecutionError as e: + LOG.debug(_('Faked command raised an exception %s'), e) + raise + + stdout = reply[0] + stderr = reply[1] + LOG.debug(_("Reply to faked command is stdout='%(stdout)s' " + "stderr='%(stderr)s'") % locals()) + + # Replicate the sleep call in the real function + greenthread.sleep(0) + return reply + + +def stub_out_utils_execute(stubs): + fake_execute_set_repliers([]) + fake_execute_clear_log() + stubs.Set(utils, 'execute', fake_execute) diff --git a/cinder/tests/integrated/__init__.py b/cinder/tests/integrated/__init__.py new file mode 100644 index 00000000000..7c17b5ad74a --- /dev/null +++ b/cinder/tests/integrated/__init__.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`integrated` -- Tests whole systems, using mock services where needed +================================= +""" +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/integrated/api/__init__.py b/cinder/tests/integrated/api/__init__.py new file mode 100644 index 00000000000..5798ab3d1e6 --- /dev/null +++ b/cinder/tests/integrated/api/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`api` -- OpenStack API client, for testing rather than production +================================= +""" diff --git a/cinder/tests/integrated/api/client.py b/cinder/tests/integrated/api/client.py new file mode 100644 index 00000000000..51e247d4ff5 --- /dev/null +++ b/cinder/tests/integrated/api/client.py @@ -0,0 +1,217 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import httplib +import urllib +import urlparse + +from cinder import log as logging + + +LOG = logging.getLogger(__name__) + + +class OpenStackApiException(Exception): + def __init__(self, message=None, response=None): + self.response = response + if not message: + message = 'Unspecified error' + + if response: + _status = response.status + _body = response.read() + + message = _('%(message)s\nStatus Code: %(_status)s\n' + 'Body: %(_body)s') % locals() + + super(OpenStackApiException, self).__init__(message) + + +class OpenStackApiAuthenticationException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Authentication error") + super(OpenStackApiAuthenticationException, self).__init__(message, + response) + + +class OpenStackApiAuthorizationException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Authorization error") + super(OpenStackApiAuthorizationException, self).__init__(message, + response) + + +class OpenStackApiNotFoundException(OpenStackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Item not found") + super(OpenStackApiNotFoundException, self).__init__(message, response) + + +class TestOpenStackClient(object): + """Simple OpenStack API Client. + + This is a really basic OpenStack API client that is under our control, + so we can make changes / insert hooks for testing + + """ + + def __init__(self, auth_user, auth_key, auth_uri): + super(TestOpenStackClient, self).__init__() + self.auth_result = None + self.auth_user = auth_user + self.auth_key = auth_key + self.auth_uri = auth_uri + # default project_id + self.project_id = 'openstack' + + def request(self, url, method='GET', body=None, headers=None): + _headers = {'Content-Type': 'application/json'} + _headers.update(headers or {}) + + parsed_url = urlparse.urlparse(url) + port = parsed_url.port + hostname = parsed_url.hostname + scheme = parsed_url.scheme + + if scheme == 'http': + conn = httplib.HTTPConnection(hostname, + port=port) + elif scheme == 'https': + conn = httplib.HTTPSConnection(hostname, + port=port) + else: + raise OpenStackApiException("Unknown scheme: %s" % url) + + relative_url = parsed_url.path + if parsed_url.query: + relative_url = relative_url + "?" + parsed_url.query + LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) + if body: + LOG.info(_("Body: %s") % body) + + conn.request(method, relative_url, body, _headers) + response = conn.getresponse() + return response + + def _authenticate(self): + if self.auth_result: + return self.auth_result + + auth_uri = self.auth_uri + headers = {'X-Auth-User': self.auth_user, + 'X-Auth-Key': self.auth_key, + 'X-Auth-Project-Id': self.project_id} + response = self.request(auth_uri, + headers=headers) + + http_status = response.status + LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) + + if http_status == 401: + raise OpenStackApiAuthenticationException(response=response) + + auth_headers = {} + for k, v in response.getheaders(): + auth_headers[k] = v + + self.auth_result = auth_headers + return self.auth_result + + def api_request(self, relative_uri, check_response_status=None, **kwargs): + auth_result = self._authenticate() + + # NOTE(justinsb): httplib 'helpfully' converts headers to lower case + base_uri = auth_result['x-server-management-url'] + + full_uri = '%s/%s' % (base_uri, relative_uri) + + headers = kwargs.setdefault('headers', {}) + headers['X-Auth-Token'] = auth_result['x-auth-token'] + + response = self.request(full_uri, **kwargs) + + http_status = response.status + LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) + + if check_response_status: + if not http_status in check_response_status: + if http_status == 404: + raise OpenStackApiNotFoundException(response=response) + elif http_status == 401: + raise OpenStackApiAuthorizationException(response=response) + else: + raise OpenStackApiException( + message=_("Unexpected status code"), + response=response) + + return response + + def _decode_json(self, response): + body = response.read() + LOG.debug(_("Decoding JSON: %s") % (body)) + if body: + return json.loads(body) + else: + return "" + + def api_get(self, relative_uri, **kwargs): + kwargs.setdefault('check_response_status', [200]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_post(self, relative_uri, body, **kwargs): + kwargs['method'] = 'POST' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = json.dumps(body) + + kwargs.setdefault('check_response_status', [200, 202]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_put(self, relative_uri, body, **kwargs): + kwargs['method'] = 'PUT' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = json.dumps(body) + + kwargs.setdefault('check_response_status', [200, 202, 204]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_delete(self, relative_uri, **kwargs): + kwargs['method'] = 'DELETE' + kwargs.setdefault('check_response_status', [200, 202, 204]) + return self.api_request(relative_uri, **kwargs) + + def get_volume(self, volume_id): + return self.api_get('/volumes/%s' % volume_id)['volume'] + + def get_volumes(self, detail=True): + rel_url = '/volumes/detail' if detail else '/volumes' + return self.api_get(rel_url)['volumes'] + + def post_volume(self, volume): + return self.api_post('/volumes', volume)['volume'] + + def delete_volume(self, volume_id): + return self.api_delete('/volumes/%s' % volume_id) diff --git a/cinder/tests/integrated/integrated_helpers.py b/cinder/tests/integrated/integrated_helpers.py new file mode 100644 index 00000000000..4b243efc13f --- /dev/null +++ b/cinder/tests/integrated/integrated_helpers.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Provides common functionality for integrated unit tests +""" + +import random +import string + +from cinder.log import logging +from cinder import service +from cinder import test # For the flags +from cinder.tests.integrated.api import client +from cinder import utils + + +LOG = logging.getLogger(__name__) + + +def generate_random_alphanumeric(length): + """Creates a random alphanumeric string of specified length.""" + return ''.join(random.choice(string.ascii_uppercase + string.digits) + for _x in range(length)) + + +def generate_random_numeric(length): + """Creates a random numeric string of specified length.""" + return ''.join(random.choice(string.digits) + for _x in range(length)) + + +def generate_new_element(items, prefix, numeric=False): + """Creates a random string with prefix, that is not in 'items' list.""" + while True: + if numeric: + candidate = prefix + generate_random_numeric(8) + else: + candidate = prefix + generate_random_alphanumeric(8) + if not candidate in items: + return candidate + LOG.debug("Random collision on %s" % candidate) + + +class _IntegratedTestBase(test.TestCase): + def setUp(self): + super(_IntegratedTestBase, self).setUp() + + f = self._get_flags() + self.flags(**f) + self.flags(verbose=True) + + # set up services + self.volume = self.start_service('volume') + self.scheduler = self.start_service('scheduler') + + self._start_api_service() + + self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url) + + def tearDown(self): + self.osapi.stop() + super(_IntegratedTestBase, self).tearDown() + + def _start_api_service(self): + self.osapi = service.WSGIService("osapi_volume") + self.osapi.start() + # FIXME(ja): this is not the auth url - this is the service url + # FIXME(ja): this needs fixed in nova as well + self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) + LOG.warn(self.auth_url) + + def _get_flags(self): + """An opportunity to setup flags, before the services are started.""" + f = {} + + # Ensure tests only listen on localhost + f['osapi_volume_listen'] = '127.0.0.1' + + # Auto-assign ports to allow concurrent tests + f['osapi_volume_listen_port'] = 0 + + return f + + def get_unused_server_name(self): + servers = self.api.get_servers() + server_names = [server['name'] for server in servers] + return generate_new_element(server_names, 'server') + + def get_invalid_image(self): + return str(utils.gen_uuid()) + + def _build_minimal_create_server_request(self): + server = {} + + image = self.api.get_images()[0] + LOG.debug("Image: %s" % image) + + if 'imageRef' in image: + image_href = image['imageRef'] + else: + image_href = image['id'] + image_href = 'http://fake.server/%s' % image_href + + # We now have a valid imageId + server['imageRef'] = image_href + + # Set a valid flavorId + flavor = self.api.get_flavors()[0] + LOG.debug("Using flavor: %s" % flavor) + server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] + + # Set a valid server name + server_name = self.get_unused_server_name() + server['name'] = server_name + return server diff --git a/cinder/tests/integrated/test_extensions.py b/cinder/tests/integrated/test_extensions.py new file mode 100644 index 00000000000..bcb921d8344 --- /dev/null +++ b/cinder/tests/integrated/test_extensions.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api.openstack.volume import extensions +from cinder import flags +from cinder.log import logging +from cinder.tests.integrated import integrated_helpers + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class ExtensionsTest(integrated_helpers._IntegratedTestBase): + def _get_flags(self): + f = super(ExtensionsTest, self)._get_flags() + f['osapi_volume_extension'] = FLAGS.osapi_volume_extension[:] + f['osapi_volume_extension'].append( + 'cinder.tests.api.openstack.volume.extensions.' + 'foxinsocks.Foxinsocks') + return f + + def test_get_foxnsocks(self): + """Simple check that fox-n-socks works.""" + response = self.api.api_request('/foxnsocks') + foxnsocks = response.read() + LOG.debug("foxnsocks: %s" % foxnsocks) + self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) diff --git a/cinder/tests/integrated/test_login.py b/cinder/tests/integrated/test_login.py new file mode 100644 index 00000000000..cd9fb7a1673 --- /dev/null +++ b/cinder/tests/integrated/test_login.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder.log import logging +from cinder.tests.integrated import integrated_helpers + + +LOG = logging.getLogger(__name__) + + +class LoginTest(integrated_helpers._IntegratedTestBase): + def test_login(self): + """Simple check - we list volumes - so we know we're logged in.""" + volumes = self.api.get_volumes() + for volume in volumes: + LOG.debug(_("volume: %s") % volume) diff --git a/cinder/tests/integrated/test_volumes.py b/cinder/tests/integrated/test_volumes.py new file mode 100644 index 00000000000..f94d85604da --- /dev/null +++ b/cinder/tests/integrated/test_volumes.py @@ -0,0 +1,181 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import unittest +import time + +from cinder import service +from cinder.log import logging +from cinder.tests.integrated import integrated_helpers +from cinder.tests.integrated.api import client +from cinder.volume import driver + + +LOG = logging.getLogger(__name__) + + +class VolumesTest(integrated_helpers._IntegratedTestBase): + def setUp(self): + super(VolumesTest, self).setUp() + driver.LoggingVolumeDriver.clear_logs() + + def _start_api_service(self): + self.osapi = service.WSGIService("osapi_volume") + self.osapi.start() + self.auth_url = 'http://%s:%s/v1' % (self.osapi.host, self.osapi.port) + LOG.warn(self.auth_url) + + def _get_flags(self): + f = super(VolumesTest, self)._get_flags() + f['use_local_volumes'] = False # Avoids calling local_path + f['volume_driver'] = 'cinder.volume.driver.LoggingVolumeDriver' + return f + + def test_get_volumes_summary(self): + """Simple check that listing volumes works.""" + volumes = self.api.get_volumes(False) + for volume in volumes: + LOG.debug("volume: %s" % volume) + + def test_get_volumes(self): + """Simple check that listing volumes works.""" + volumes = self.api.get_volumes() + for volume in volumes: + LOG.debug("volume: %s" % volume) + + def _poll_while(self, volume_id, continue_states, max_retries=5): + """Poll (briefly) while the state is in continue_states.""" + retries = 0 + while True: + try: + found_volume = self.api.get_volume(volume_id) + except client.OpenStackApiNotFoundException: + found_volume = None + LOG.debug("Got 404, proceeding") + break + + LOG.debug("Found %s" % found_volume) + + self.assertEqual(volume_id, found_volume['id']) + + if not found_volume['status'] in continue_states: + break + + time.sleep(1) + retries = retries + 1 + if retries > max_retries: + break + return found_volume + + def test_create_and_delete_volume(self): + """Creates and deletes a volume.""" + + # Create volume + created_volume = self.api.post_volume({'volume': {'size': 1}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + + # It should also be in the all-volume list + volumes = self.api.get_volumes() + volume_names = [volume['id'] for volume in volumes] + self.assertTrue(created_volume_id in volume_names) + + # Wait (briefly) for creation. Delay is due to the 'message queue' + found_volume = self._poll_while(created_volume_id, ['creating']) + + # It should be available... + self.assertEqual('available', found_volume['status']) + + # Delete the volume + self.api.delete_volume(created_volume_id) + + # Wait (briefly) for deletion. Delay is due to the 'message queue' + found_volume = self._poll_while(created_volume_id, ['deleting']) + + # Should be gone + self.assertFalse(found_volume) + + LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs()) + + create_actions = driver.LoggingVolumeDriver.logs_like( + 'create_volume', + id=created_volume_id) + LOG.debug("Create_Actions: %s" % create_actions) + + self.assertEquals(1, len(create_actions)) + create_action = create_actions[0] + self.assertEquals(create_action['id'], created_volume_id) + self.assertEquals(create_action['availability_zone'], 'cinder') + self.assertEquals(create_action['size'], 1) + + export_actions = driver.LoggingVolumeDriver.logs_like( + 'create_export', + id=created_volume_id) + self.assertEquals(1, len(export_actions)) + export_action = export_actions[0] + self.assertEquals(export_action['id'], created_volume_id) + self.assertEquals(export_action['availability_zone'], 'cinder') + + delete_actions = driver.LoggingVolumeDriver.logs_like( + 'delete_volume', + id=created_volume_id) + self.assertEquals(1, len(delete_actions)) + delete_action = export_actions[0] + self.assertEquals(delete_action['id'], created_volume_id) + + def test_create_volume_with_metadata(self): + """Creates a volume with metadata.""" + + # Create volume + metadata = {'key1': 'value1', + 'key2': 'value2'} + created_volume = self.api.post_volume( + {'volume': {'size': 1, + 'metadata': metadata}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there and metadata present + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(metadata, found_volume['metadata']) + + def test_create_volume_in_availability_zone(self): + """Creates a volume in availability_zone.""" + + # Create volume + availability_zone = 'zone1:host1' + created_volume = self.api.post_volume( + {'volume': {'size': 1, + 'availability_zone': availability_zone}}) + LOG.debug("created_volume: %s" % created_volume) + self.assertTrue(created_volume['id']) + created_volume_id = created_volume['id'] + + # Check it's there and availability zone present + found_volume = self.api.get_volume(created_volume_id) + self.assertEqual(created_volume_id, found_volume['id']) + self.assertEqual(availability_zone, found_volume['availability_zone']) + +if __name__ == "__main__": + unittest.main() diff --git a/cinder/tests/integrated/test_xml.py b/cinder/tests/integrated/test_xml.py new file mode 100644 index 00000000000..1277b80aabe --- /dev/null +++ b/cinder/tests/integrated/test_xml.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from lxml import etree + +from cinder.log import logging +from cinder.tests.integrated import integrated_helpers +from cinder.api.openstack import common +from cinder.api.openstack import xmlutil + + +LOG = logging.getLogger(__name__) + + +class XmlTests(integrated_helpers._IntegratedTestBase): + """"Some basic XML sanity checks.""" + + # FIXME(ja): does cinder need limits? + # def test_namespace_limits(self): + # headers = {} + # headers['Accept'] = 'application/xml' + + # response = self.api.api_request('/limits', headers=headers) + # data = response.read() + # LOG.debug("data: %s" % data) + # root = etree.XML(data) + # self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10) + + def test_namespace_volumes(self): + """/servers should have v1.1 namespace (has changed in 1.1).""" + headers = {} + headers['Accept'] = 'application/xml' + + response = self.api.api_request('/volumes', headers=headers) + data = response.read() + LOG.warn("data: %s" % data) + root = etree.XML(data) + self.assertEqual(root.nsmap.get(None), common.XML_NS_V1) diff --git a/cinder/tests/monkey_patch_example/__init__.py b/cinder/tests/monkey_patch_example/__init__.py new file mode 100644 index 00000000000..25cf9ccfe68 --- /dev/null +++ b/cinder/tests/monkey_patch_example/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Example Module for testing utils.monkey_patch().""" + + +CALLED_FUNCTION = [] + + +def example_decorator(name, function): + """ decorator for notify which is used from utils.monkey_patch() + + :param name: name of the function + :param function: - object of the function + :returns: function -- decorated function + """ + def wrapped_func(*args, **kwarg): + CALLED_FUNCTION.append(name) + return function(*args, **kwarg) + return wrapped_func diff --git a/cinder/tests/monkey_patch_example/example_a.py b/cinder/tests/monkey_patch_example/example_a.py new file mode 100644 index 00000000000..21e79bcb0f7 --- /dev/null +++ b/cinder/tests/monkey_patch_example/example_a.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Example Module A for testing utils.monkey_patch().""" + + +def example_function_a(): + return 'Example function' + + +class ExampleClassA(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/cinder/tests/monkey_patch_example/example_b.py b/cinder/tests/monkey_patch_example/example_b.py new file mode 100644 index 00000000000..9d8f6d339ef --- /dev/null +++ b/cinder/tests/monkey_patch_example/example_b.py @@ -0,0 +1,30 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Example Module B for testing utils.monkey_patch().""" + + +def example_function_b(): + return 'Example function' + + +class ExampleClassB(): + def example_method(self): + return 'Example method' + + def example_method_add(self, arg1, arg2): + return arg1 + arg2 diff --git a/cinder/tests/notifier/__init__.py b/cinder/tests/notifier/__init__.py new file mode 100644 index 00000000000..643e7c46387 --- /dev/null +++ b/cinder/tests/notifier/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.tests import * diff --git a/cinder/tests/notifier/test_capacity_notifier.py b/cinder/tests/notifier/test_capacity_notifier.py new file mode 100644 index 00000000000..298de5f60b8 --- /dev/null +++ b/cinder/tests/notifier/test_capacity_notifier.py @@ -0,0 +1,59 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cinder.db.api +from cinder.notifier import capacity_notifier as cn +from cinder import test +from cinder import utils + + +class CapacityNotifierTestCase(test.TestCase): + """Test case for the Capacity updating notifier.""" + + def _make_msg(self, host, event): + usage_info = dict(memory_mb=123, disk_gb=456) + payload = utils.to_primitive(usage_info, convert_instances=True) + return dict( + publisher_id="compute.%s" % host, + event_type="compute.instance.%s" % event, + payload=payload + ) + + def test_event_type(self): + msg = self._make_msg("myhost", "mymethod") + msg['event_type'] = 'random' + self.assertFalse(cn.notify(msg)) + + def test_bad_event_suffix(self): + msg = self._make_msg("myhost", "mymethod.badsuffix") + self.assertFalse(cn.notify(msg)) + + def test_bad_publisher_id(self): + msg = self._make_msg("myhost", "mymethod.start") + msg['publisher_id'] = 'badpublisher' + self.assertFalse(cn.notify(msg)) + + def test_update_called(self): + def _verify_called(host, context, free_ram_mb_delta, + free_disk_gb_delta, work_delta, vm_delta): + self.assertEquals(free_ram_mb_delta, 123) + self.assertEquals(free_disk_gb_delta, 456) + self.assertEquals(vm_delta, -1) + self.assertEquals(work_delta, -1) + + self.stubs.Set(cinder.db.api, "compute_node_utilization_update", + _verify_called) + msg = self._make_msg("myhost", "delete.end") + self.assertTrue(cn.notify(msg)) diff --git a/cinder/tests/notifier/test_list_notifier.py b/cinder/tests/notifier/test_list_notifier.py new file mode 100644 index 00000000000..83e380764ac --- /dev/null +++ b/cinder/tests/notifier/test_list_notifier.py @@ -0,0 +1,84 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cinder +from cinder import log as logging +import cinder.notifier.api +import cinder.notifier.log_notifier +import cinder.notifier.no_op_notifier +from cinder.notifier import list_notifier +from cinder import test + + +class NotifierListTestCase(test.TestCase): + """Test case for notifications""" + + def setUp(self): + super(NotifierListTestCase, self).setUp() + list_notifier._reset_drivers() + # Mock log to add one to exception_count when log.exception is called + + def mock_exception(cls, *args): + self.exception_count += 1 + + self.exception_count = 0 + list_notifier_log = logging.getLogger('cinder.notifier.list_notifier') + self.stubs.Set(list_notifier_log, "exception", mock_exception) + # Mock no_op notifier to add one to notify_count when called. + + def mock_notify(cls, *args): + self.notify_count += 1 + + self.notify_count = 0 + self.stubs.Set(cinder.notifier.no_op_notifier, 'notify', mock_notify) + # Mock log_notifier to raise RuntimeError when called. + + def mock_notify2(cls, *args): + raise RuntimeError("Bad notifier.") + + self.stubs.Set(cinder.notifier.log_notifier, 'notify', mock_notify2) + + def tearDown(self): + list_notifier._reset_drivers() + super(NotifierListTestCase, self).tearDown() + + def test_send_notifications_successfully(self): + self.flags(notification_driver='cinder.notifier.list_notifier', + list_notifier_drivers=['cinder.notifier.no_op_notifier', + 'cinder.notifier.no_op_notifier']) + cinder.notifier.api.notify('publisher_id', 'event_type', + cinder.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_count, 2) + self.assertEqual(self.exception_count, 0) + + def test_send_notifications_with_errors(self): + + self.flags(notification_driver='cinder.notifier.list_notifier', + list_notifier_drivers=['cinder.notifier.no_op_notifier', + 'cinder.notifier.log_notifier']) + cinder.notifier.api.notify('publisher_id', + 'event_type', cinder.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_count, 1) + self.assertEqual(self.exception_count, 1) + + def test_when_driver_fails_to_import(self): + self.flags(notification_driver='cinder.notifier.list_notifier', + list_notifier_drivers=['cinder.notifier.no_op_notifier', + 'cinder.notifier.logo_notifier', + 'fdsjgsdfhjkhgsfkj']) + cinder.notifier.api.notify('publisher_id', + 'event_type', cinder.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.exception_count, 2) + self.assertEqual(self.notify_count, 1) diff --git a/cinder/tests/policy.json b/cinder/tests/policy.json new file mode 100644 index 00000000000..940dedb455d --- /dev/null +++ b/cinder/tests/policy.json @@ -0,0 +1,25 @@ +{ + "volume:create": [], + "volume:get": [], + "volume:get_all": [], + "volume:get_volume_metadata": [], + "volume:delete": [], + "volume:update": [], + "volume:delete_volume_metadata": [], + "volume:update_volume_metadata": [], + "volume:attach": [], + "volume:detach": [], + "volume:reserve_volume": [], + "volume:unreserve_volume": [], + "volume:check_attach": [], + "volume:check_detach": [], + "volume:initialize_connection": [], + "volume:terminate_connection": [], + "volume:create_snapshot": [], + "volume:delete_snapshot": [], + "volume:get_snapshot": [], + "volume:get_all_snapshots": [], + + "volume_extension:types_manage": [], + "volume_extension:types_extra_specs": [] +} diff --git a/cinder/tests/rpc/__init__.py b/cinder/tests/rpc/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/rpc/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/rpc/common.py b/cinder/tests/rpc/common.py new file mode 100644 index 00000000000..4de1be14419 --- /dev/null +++ b/cinder/tests/rpc/common.py @@ -0,0 +1,239 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls shared between all implementations +""" + +import time + +from eventlet import greenthread +import nose + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.rpc import amqp as rpc_amqp +from cinder.rpc import common as rpc_common +from cinder import test + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class BaseRpcTestCase(test.TestCase): + def setUp(self, supports_timeouts=True): + super(BaseRpcTestCase, self).setUp() + self.conn = self.rpc.create_connection(FLAGS, True) + self.receiver = TestReceiver() + self.conn.create_consumer('test', self.receiver, False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + self.supports_timeouts = supports_timeouts + + def tearDown(self): + self.conn.close() + super(BaseRpcTestCase, self).tearDown() + + def test_call_succeed(self): + value = 42 + result = self.rpc.call(FLAGS, self.context, 'test', + {"method": "echo", "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = self.rpc.call(FLAGS, self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = self.rpc.multicall(FLAGS, self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_three_nones(self): + value = 42 + result = self.rpc.multicall(FLAGS, self.context, + 'test', + {"method": "multicall_three_nones", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(x, None) + # i should have been 0, 1, and finally 2: + self.assertEqual(i, 2) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = self.rpc.multicall(FLAGS, self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = self.rpc.call(FLAGS, self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO(comstud): + # so, it will replay the context and use the same REQID? + # that's bizarre. + ret = self.rpc.call(FLAGS, context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = self.rpc.create_connection(FLAGS, True) + conn.create_consumer('nested', nested, False) + conn.consume_in_thread() + value = 42 + result = self.rpc.call(FLAGS, self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + def test_call_timeout(self): + """Make sure rpc.call will time out""" + if not self.supports_timeouts: + raise nose.SkipTest(_("RPC backend does not support timeouts")) + + value = 42 + self.assertRaises(rpc_common.Timeout, + self.rpc.call, + FLAGS, self.context, + 'test', + {"method": "block", + "args": {"value": value}}, timeout=1) + try: + self.rpc.call(FLAGS, self.context, + 'test', + {"method": "block", + "args": {"value": value}}, + timeout=1) + self.fail("should have thrown Timeout") + except rpc_common.Timeout as exc: + pass + + +class BaseRpcAMQPTestCase(BaseRpcTestCase): + """Base test class for all AMQP-based RPC tests""" + def test_proxycallback_handles_exceptions(self): + """Make sure exceptions unpacking messages don't cause hangs.""" + orig_unpack = rpc_amqp.unpack_context + + info = {'unpacked': False} + + def fake_unpack_context(*args, **kwargs): + info['unpacked'] = True + raise test.TestingException('moo') + + self.stubs.Set(rpc_amqp, 'unpack_context', fake_unpack_context) + + value = 41 + self.rpc.cast(FLAGS, self.context, 'test', + {"method": "echo", "args": {"value": value}}) + + # Wait for the cast to complete. + for x in xrange(50): + if info['unpacked']: + break + greenthread.sleep(0.1) + else: + self.fail("Timeout waiting for message to be consued") + + # Now see if we get a response even though we raised an + # exception for the cast above. + self.stubs.Set(rpc_amqp, 'unpack_context', orig_unpack) + + value = 42 + result = self.rpc.call(FLAGS, self.context, 'test', + {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def multicall_three_nones(context, value): + yield None + yield None + yield None + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise NotImplementedError(value) + + @staticmethod + def fail_converted(context, value): + """Raises an exception with the value sent in.""" + raise exception.ConvertedException(explanation=value) + + @staticmethod + def block(context, value): + time.sleep(2) diff --git a/cinder/tests/rpc/test_common.py b/cinder/tests/rpc/test_common.py new file mode 100644 index 00000000000..5fd257a6b6e --- /dev/null +++ b/cinder/tests/rpc/test_common.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for 'common' functons used through rpc code. +""" + +import json +import sys + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import test +from cinder.rpc import amqp as rpc_amqp +from cinder.rpc import common as rpc_common +from cinder.tests.rpc import common + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def raise_exception(): + raise Exception("test") + + +class FakeUserDefinedException(Exception): + def __init__(self): + Exception.__init__(self, "Test Message") + + +class RpcCommonTestCase(test.TestCase): + def test_serialize_remote_exception(self): + expected = { + 'class': 'Exception', + 'module': 'exceptions', + 'message': 'test', + } + + try: + raise_exception() + except Exception as exc: + failure = rpc_common.serialize_remote_exception(sys.exc_info()) + + failure = json.loads(failure) + #assure the traceback was added + self.assertEqual(expected['class'], failure['class']) + self.assertEqual(expected['module'], failure['module']) + self.assertEqual(expected['message'], failure['message']) + + def test_serialize_remote_cinder_exception(self): + def raise_cinder_exception(): + raise exception.CinderException("test", code=500) + + expected = { + 'class': 'CinderException', + 'module': 'cinder.exception', + 'kwargs': {'code': 500}, + 'message': 'test' + } + + try: + raise_cinder_exception() + except Exception as exc: + failure = rpc_common.serialize_remote_exception(sys.exc_info()) + + failure = json.loads(failure) + #assure the traceback was added + self.assertEqual(expected['class'], failure['class']) + self.assertEqual(expected['module'], failure['module']) + self.assertEqual(expected['kwargs'], failure['kwargs']) + self.assertEqual(expected['message'], failure['message']) + + def test_deserialize_remote_exception(self): + failure = { + 'class': 'CinderException', + 'module': 'cinder.exception', + 'message': 'test message', + 'tb': ['raise CinderException'], + } + serialized = json.dumps(failure) + + after_exc = rpc_common.deserialize_remote_exception(FLAGS, serialized) + self.assertTrue(isinstance(after_exc, exception.CinderException)) + self.assertTrue('test message' in unicode(after_exc)) + #assure the traceback was added + self.assertTrue('raise CinderException' in unicode(after_exc)) + + def test_deserialize_remote_exception_bad_module(self): + failure = { + 'class': 'popen2', + 'module': 'os', + 'kwargs': {'cmd': '/bin/echo failed'}, + 'message': 'foo', + } + serialized = json.dumps(failure) + + after_exc = rpc_common.deserialize_remote_exception(FLAGS, serialized) + self.assertTrue(isinstance(after_exc, rpc_common.RemoteError)) + + def test_deserialize_remote_exception_user_defined_exception(self): + """Ensure a user defined exception can be deserialized.""" + self.flags(allowed_rpc_exception_modules=[self.__class__.__module__]) + failure = { + 'class': 'FakeUserDefinedException', + 'module': self.__class__.__module__, + 'tb': ['raise FakeUserDefinedException'], + } + serialized = json.dumps(failure) + + after_exc = rpc_common.deserialize_remote_exception(FLAGS, serialized) + self.assertTrue(isinstance(after_exc, FakeUserDefinedException)) + #assure the traceback was added + self.assertTrue('raise FakeUserDefinedException' in unicode(after_exc)) + + def test_deserialize_remote_exception_cannot_recreate(self): + """Ensure a RemoteError is returned on initialization failure. + + If an exception cannot be recreated with it's original class then a + RemoteError with the exception informations should still be returned. + + """ + self.flags(allowed_rpc_exception_modules=[self.__class__.__module__]) + failure = { + 'class': 'FakeIDontExistException', + 'module': self.__class__.__module__, + 'tb': ['raise FakeIDontExistException'], + } + serialized = json.dumps(failure) + + after_exc = rpc_common.deserialize_remote_exception(FLAGS, serialized) + self.assertTrue(isinstance(after_exc, rpc_common.RemoteError)) + #assure the traceback was added + self.assertTrue('raise FakeIDontExistException' in unicode(after_exc)) diff --git a/cinder/tests/rpc/test_fake.py b/cinder/tests/rpc/test_fake.py new file mode 100644 index 00000000000..4f6722ab300 --- /dev/null +++ b/cinder/tests/rpc/test_fake.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using fake_impl +""" + +from cinder import log as logging +from cinder.rpc import impl_fake +from cinder.tests.rpc import common + + +LOG = logging.getLogger(__name__) + + +class RpcFakeTestCase(common.BaseRpcTestCase): + def setUp(self): + self.rpc = impl_fake + super(RpcFakeTestCase, self).setUp() diff --git a/cinder/tests/rpc/test_kombu.py b/cinder/tests/rpc/test_kombu.py new file mode 100644 index 00000000000..3ce5337c69f --- /dev/null +++ b/cinder/tests/rpc/test_kombu.py @@ -0,0 +1,350 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using kombu +""" + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import test +from cinder.rpc import amqp as rpc_amqp +from cinder.rpc import impl_kombu +from cinder.tests.rpc import common + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class MyException(Exception): + pass + + +def _raise_exc_stub(stubs, times, obj, method, exc_msg, + exc_class=MyException): + info = {'called': 0} + orig_method = getattr(obj, method) + + def _raise_stub(*args, **kwargs): + info['called'] += 1 + if info['called'] <= times: + raise exc_class(exc_msg) + orig_method(*args, **kwargs) + stubs.Set(obj, method, _raise_stub) + return info + + +class RpcKombuTestCase(common.BaseRpcAMQPTestCase): + def setUp(self): + self.rpc = impl_kombu + impl_kombu.register_opts(FLAGS) + super(RpcKombuTestCase, self).setUp() + + def tearDown(self): + impl_kombu.cleanup() + super(RpcKombuTestCase, self).tearDown() + + def test_reusing_connection(self): + """Test that reusing a connection returns same one.""" + conn_context = self.rpc.create_connection(FLAGS, new=False) + conn1 = conn_context.connection + conn_context.close() + conn_context = self.rpc.create_connection(FLAGS, new=False) + conn2 = conn_context.connection + conn_context.close() + self.assertEqual(conn1, conn2) + + def test_topic_send_receive(self): + """Test sending to a topic exchange/queue""" + + conn = self.rpc.create_connection(FLAGS) + message = 'topic test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_topic_consumer('a_topic', _callback) + conn.topic_send('a_topic', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_direct_send_receive(self): + """Test sending to a direct exchange/queue""" + conn = self.rpc.create_connection(FLAGS) + message = 'direct test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_cast_interface_uses_default_options(self): + """Test kombu rpc.cast""" + + ctxt = context.RequestContext('fake_user', 'fake_project') + + class MyConnection(impl_kombu.Connection): + def __init__(myself, *args, **kwargs): + super(MyConnection, myself).__init__(*args, **kwargs) + self.assertEqual(myself.params, + {'hostname': FLAGS.rabbit_host, + 'userid': FLAGS.rabbit_userid, + 'password': FLAGS.rabbit_password, + 'port': FLAGS.rabbit_port, + 'virtual_host': FLAGS.rabbit_virtual_host, + 'transport': 'memory'}) + + def topic_send(_context, topic, msg): + pass + + MyConnection.pool = rpc_amqp.Pool(FLAGS, MyConnection) + self.stubs.Set(impl_kombu, 'Connection', MyConnection) + + impl_kombu.cast(FLAGS, ctxt, 'fake_topic', {'msg': 'fake'}) + + def test_cast_to_server_uses_server_params(self): + """Test kombu rpc.cast""" + + ctxt = context.RequestContext('fake_user', 'fake_project') + + server_params = {'username': 'fake_username', + 'password': 'fake_password', + 'hostname': 'fake_hostname', + 'port': 31337, + 'virtual_host': 'fake_virtual_host'} + + class MyConnection(impl_kombu.Connection): + def __init__(myself, *args, **kwargs): + super(MyConnection, myself).__init__(*args, **kwargs) + self.assertEqual(myself.params, + {'hostname': server_params['hostname'], + 'userid': server_params['username'], + 'password': server_params['password'], + 'port': server_params['port'], + 'virtual_host': server_params['virtual_host'], + 'transport': 'memory'}) + + def topic_send(_context, topic, msg): + pass + + MyConnection.pool = rpc_amqp.Pool(FLAGS, MyConnection) + self.stubs.Set(impl_kombu, 'Connection', MyConnection) + + impl_kombu.cast_to_server(FLAGS, ctxt, server_params, + 'fake_topic', {'msg': 'fake'}) + + @test.skip_test("kombu memory transport seems buggy with fanout queues " + "as this test passes when you use rabbit (fake_rabbit=False)") + def test_fanout_send_receive(self): + """Test sending to a fanout exchange and consuming from 2 queues""" + + conn = self.rpc.create_connection() + conn2 = self.rpc.create_connection() + message = 'fanout test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_fanout_consumer('a_fanout', _callback) + conn2.declare_fanout_consumer('a_fanout', _callback) + conn.fanout_send('a_fanout', message) + + conn.consume(limit=1) + conn.close() + self.assertEqual(self.received_message, message) + + self.received_message = None + conn2.consume(limit=1) + conn2.close() + self.assertEqual(self.received_message, message) + + def test_declare_consumer_errors_will_reconnect(self): + # Test that any exception with 'timeout' in it causes a + # reconnection + info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectConsumer, + '__init__', 'foo timeout foo') + + conn = self.rpc.Connection(FLAGS) + result = conn.declare_consumer(self.rpc.DirectConsumer, + 'test_topic', None) + + self.assertEqual(info['called'], 3) + self.assertTrue(isinstance(result, self.rpc.DirectConsumer)) + + # Test that any exception in transport.connection_errors causes + # a reconnection + self.stubs.UnsetAll() + + info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectConsumer, + '__init__', 'meow') + + conn = self.rpc.Connection(FLAGS) + conn.connection_errors = (MyException, ) + + result = conn.declare_consumer(self.rpc.DirectConsumer, + 'test_topic', None) + + self.assertEqual(info['called'], 2) + self.assertTrue(isinstance(result, self.rpc.DirectConsumer)) + + def test_declare_consumer_ioerrors_will_reconnect(self): + """Test that an IOError exception causes a reconnection""" + info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectConsumer, + '__init__', 'Socket closed', exc_class=IOError) + + conn = self.rpc.Connection(FLAGS) + result = conn.declare_consumer(self.rpc.DirectConsumer, + 'test_topic', None) + + self.assertEqual(info['called'], 3) + self.assertTrue(isinstance(result, self.rpc.DirectConsumer)) + + def test_publishing_errors_will_reconnect(self): + # Test that any exception with 'timeout' in it causes a + # reconnection when declaring the publisher class and when + # calling send() + info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectPublisher, + '__init__', 'foo timeout foo') + + conn = self.rpc.Connection(FLAGS) + conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg') + + self.assertEqual(info['called'], 3) + self.stubs.UnsetAll() + + info = _raise_exc_stub(self.stubs, 2, self.rpc.DirectPublisher, + 'send', 'foo timeout foo') + + conn = self.rpc.Connection(FLAGS) + conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg') + + self.assertEqual(info['called'], 3) + + # Test that any exception in transport.connection_errors causes + # a reconnection when declaring the publisher class and when + # calling send() + self.stubs.UnsetAll() + + info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectPublisher, + '__init__', 'meow') + + conn = self.rpc.Connection(FLAGS) + conn.connection_errors = (MyException, ) + + conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg') + + self.assertEqual(info['called'], 2) + self.stubs.UnsetAll() + + info = _raise_exc_stub(self.stubs, 1, self.rpc.DirectPublisher, + 'send', 'meow') + + conn = self.rpc.Connection(FLAGS) + conn.connection_errors = (MyException, ) + + conn.publisher_send(self.rpc.DirectPublisher, 'test_topic', 'msg') + + self.assertEqual(info['called'], 2) + + @test.skip_test("kombu memory transport hangs here on precise") + def test_iterconsume_errors_will_reconnect(self): + conn = self.rpc.Connection(FLAGS) + message = 'reconnect test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + + info = _raise_exc_stub(self.stubs, 1, conn.connection, + 'drain_events', 'foo timeout foo') + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + # Only called once, because our stub goes away during reconnection + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns an Exception object. The value of the + exception is converted to a string. + + """ + self.flags(allowed_rpc_exception_modules=['exceptions']) + value = "This is the exception message" + self.assertRaises(NotImplementedError, + self.rpc.call, + FLAGS, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + self.rpc.call(FLAGS, self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown Exception") + except NotImplementedError as exc: + self.assertTrue(value in unicode(exc)) + #Traceback should be included in exception message + self.assertTrue('raise NotImplementedError(value)' in unicode(exc)) + + def test_call_converted_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns an Exception object. The value of the + exception is converted to a string. + + """ + value = "This is the exception message" + self.assertRaises(exception.ConvertedException, + self.rpc.call, + FLAGS, + self.context, + 'test', + {"method": "fail_converted", + "args": {"value": value}}) + try: + self.rpc.call(FLAGS, self.context, + 'test', + {"method": "fail_converted", + "args": {"value": value}}) + self.fail("should have thrown Exception") + except exception.ConvertedException as exc: + self.assertTrue(value in unicode(exc)) + #Traceback should be included in exception message + self.assertTrue('exception.ConvertedException' in unicode(exc)) diff --git a/cinder/tests/rpc/test_kombu_ssl.py b/cinder/tests/rpc/test_kombu_ssl.py new file mode 100644 index 00000000000..b1addec929f --- /dev/null +++ b/cinder/tests/rpc/test_kombu_ssl.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using kombu + ssl +""" + +from cinder import flags +from cinder import test +from cinder.rpc import impl_kombu + +# Flag settings we will ensure get passed to amqplib +SSL_VERSION = "SSLv2" +SSL_CERT = "/tmp/cert.blah.blah" +SSL_CA_CERT = "/tmp/cert.ca.blah.blah" +SSL_KEYFILE = "/tmp/keyfile.blah.blah" + +FLAGS = flags.FLAGS + + +class RpcKombuSslTestCase(test.TestCase): + + def setUp(self): + super(RpcKombuSslTestCase, self).setUp() + impl_kombu.register_opts(FLAGS) + self.flags(kombu_ssl_keyfile=SSL_KEYFILE, + kombu_ssl_ca_certs=SSL_CA_CERT, + kombu_ssl_certfile=SSL_CERT, + kombu_ssl_version=SSL_VERSION, + rabbit_use_ssl=True) + + def test_ssl_on_extended(self): + rpc = impl_kombu + conn = rpc.create_connection(FLAGS, True) + c = conn.connection + #This might be kombu version dependent... + #Since we are now peaking into the internals of kombu... + self.assertTrue(isinstance(c.connection.ssl, dict)) + self.assertEqual(SSL_VERSION, c.connection.ssl.get("ssl_version")) + self.assertEqual(SSL_CERT, c.connection.ssl.get("certfile")) + self.assertEqual(SSL_CA_CERT, c.connection.ssl.get("ca_certs")) + self.assertEqual(SSL_KEYFILE, c.connection.ssl.get("keyfile")) + #That hash then goes into amqplib which then goes + #Into python ssl creation... diff --git a/cinder/tests/rpc/test_qpid.py b/cinder/tests/rpc/test_qpid.py new file mode 100644 index 00000000000..4ec86516908 --- /dev/null +++ b/cinder/tests/rpc/test_qpid.py @@ -0,0 +1,340 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2012, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using qpid +""" + +import mox + +from cinder import context +from cinder import flags +from cinder import log as logging +from cinder.rpc import amqp as rpc_amqp +from cinder import test + +try: + import qpid + from cinder.rpc import impl_qpid +except ImportError: + qpid = None + impl_qpid = None + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class RpcQpidTestCase(test.TestCase): + """ + Exercise the public API of impl_qpid utilizing mox. + + This set of tests utilizes mox to replace the Qpid objects and ensures + that the right operations happen on them when the various public rpc API + calls are exercised. The API calls tested here include: + + cinder.rpc.create_connection() + cinder.rpc.common.Connection.create_consumer() + cinder.rpc.common.Connection.close() + cinder.rpc.cast() + cinder.rpc.fanout_cast() + cinder.rpc.call() + cinder.rpc.multicall() + """ + + def setUp(self): + super(RpcQpidTestCase, self).setUp() + + self.mock_connection = None + self.mock_session = None + self.mock_sender = None + self.mock_receiver = None + + if qpid: + impl_qpid.register_opts(FLAGS) + self.orig_connection = qpid.messaging.Connection + self.orig_session = qpid.messaging.Session + self.orig_sender = qpid.messaging.Sender + self.orig_receiver = qpid.messaging.Receiver + qpid.messaging.Connection = lambda *_x, **_y: self.mock_connection + qpid.messaging.Session = lambda *_x, **_y: self.mock_session + qpid.messaging.Sender = lambda *_x, **_y: self.mock_sender + qpid.messaging.Receiver = lambda *_x, **_y: self.mock_receiver + + def tearDown(self): + if qpid: + qpid.messaging.Connection = self.orig_connection + qpid.messaging.Session = self.orig_session + qpid.messaging.Sender = self.orig_sender + qpid.messaging.Receiver = self.orig_receiver + if impl_qpid: + # Need to reset this in case we changed the connection_cls + # in self._setup_to_server_tests() + impl_qpid.Connection.pool.connection_cls = impl_qpid.Connection + + super(RpcQpidTestCase, self).tearDown() + + @test.skip_if(qpid is None, "Test requires qpid") + def test_create_connection(self): + self.mock_connection = self.mox.CreateMock(self.orig_connection) + self.mock_session = self.mox.CreateMock(self.orig_session) + + self.mock_connection.opened().AndReturn(False) + self.mock_connection.open() + self.mock_connection.session().AndReturn(self.mock_session) + self.mock_connection.close() + + self.mox.ReplayAll() + + connection = impl_qpid.create_connection(FLAGS) + connection.close() + + def _test_create_consumer(self, fanout): + self.mock_connection = self.mox.CreateMock(self.orig_connection) + self.mock_session = self.mox.CreateMock(self.orig_session) + self.mock_receiver = self.mox.CreateMock(self.orig_receiver) + + self.mock_connection.opened().AndReturn(False) + self.mock_connection.open() + self.mock_connection.session().AndReturn(self.mock_session) + if fanout: + # The link name includes a UUID, so match it with a regex. + expected_address = mox.Regex(r'^impl_qpid_test_fanout ; ' + '{"node": {"x-declare": {"auto-delete": true, "durable": ' + 'false, "type": "fanout"}, "type": "topic"}, "create": ' + '"always", "link": {"x-declare": {"auto-delete": true, ' + '"exclusive": true, "durable": false}, "durable": true, ' + '"name": "impl_qpid_test_fanout_.*"}}$') + else: + expected_address = ( + 'cinder/impl_qpid_test ; {"node": {"x-declare": ' + '{"auto-delete": true, "durable": true}, "type": "topic"}, ' + '"create": "always", "link": {"x-declare": {"auto-delete": ' + 'true, "exclusive": false, "durable": false}, "durable": ' + 'true, "name": "impl_qpid_test"}}') + self.mock_session.receiver(expected_address).AndReturn( + self.mock_receiver) + self.mock_receiver.capacity = 1 + self.mock_connection.close() + + self.mox.ReplayAll() + + connection = impl_qpid.create_connection(FLAGS) + connection.create_consumer("impl_qpid_test", + lambda *_x, **_y: None, + fanout) + connection.close() + + @test.skip_if(qpid is None, "Test requires qpid") + def test_create_consumer(self): + self._test_create_consumer(fanout=False) + + @test.skip_if(qpid is None, "Test requires qpid") + def test_create_consumer_fanout(self): + self._test_create_consumer(fanout=True) + + def _test_cast(self, fanout, server_params=None): + self.mock_connection = self.mox.CreateMock(self.orig_connection) + self.mock_session = self.mox.CreateMock(self.orig_session) + self.mock_sender = self.mox.CreateMock(self.orig_sender) + + self.mock_connection.opened().AndReturn(False) + self.mock_connection.open() + + self.mock_connection.session().AndReturn(self.mock_session) + if fanout: + expected_address = ('impl_qpid_test_fanout ; ' + '{"node": {"x-declare": {"auto-delete": true, ' + '"durable": false, "type": "fanout"}, ' + '"type": "topic"}, "create": "always"}') + else: + expected_address = ( + 'cinder/impl_qpid_test ; {"node": {"x-declare": ' + '{"auto-delete": true, "durable": false}, "type": "topic"}, ' + '"create": "always"}') + self.mock_session.sender(expected_address).AndReturn(self.mock_sender) + self.mock_sender.send(mox.IgnoreArg()) + if not server_params: + # This is a pooled connection, so instead of closing it, it + # gets reset, which is just creating a new session on the + # connection. + self.mock_session.close() + self.mock_connection.session().AndReturn(self.mock_session) + + self.mox.ReplayAll() + + try: + ctx = context.RequestContext("user", "project") + + args = [FLAGS, ctx, "impl_qpid_test", + {"method": "test_method", "args": {}}] + + if server_params: + args.insert(2, server_params) + if fanout: + method = impl_qpid.fanout_cast_to_server + else: + method = impl_qpid.cast_to_server + else: + if fanout: + method = impl_qpid.fanout_cast + else: + method = impl_qpid.cast + + method(*args) + finally: + while impl_qpid.Connection.pool.free_items: + # Pull the mock connection object out of the connection pool so + # that it doesn't mess up other test cases. + impl_qpid.Connection.pool.get() + + @test.skip_if(qpid is None, "Test requires qpid") + def test_cast(self): + self._test_cast(fanout=False) + + @test.skip_if(qpid is None, "Test requires qpid") + def test_fanout_cast(self): + self._test_cast(fanout=True) + + def _setup_to_server_tests(self, server_params): + class MyConnection(impl_qpid.Connection): + def __init__(myself, *args, **kwargs): + super(MyConnection, myself).__init__(*args, **kwargs) + self.assertEqual(myself.connection.username, + server_params['username']) + self.assertEqual(myself.connection.password, + server_params['password']) + self.assertEqual(myself.broker, + server_params['hostname'] + ':' + + str(server_params['port'])) + + MyConnection.pool = rpc_amqp.Pool(FLAGS, MyConnection) + self.stubs.Set(impl_qpid, 'Connection', MyConnection) + + @test.skip_if(qpid is None, "Test requires qpid") + def test_cast_to_server(self): + server_params = {'username': 'fake_username', + 'password': 'fake_password', + 'hostname': 'fake_hostname', + 'port': 31337} + self._setup_to_server_tests(server_params) + self._test_cast(fanout=False, server_params=server_params) + + @test.skip_if(qpid is None, "Test requires qpid") + def test_fanout_cast_to_server(self): + server_params = {'username': 'fake_username', + 'password': 'fake_password', + 'hostname': 'fake_hostname', + 'port': 31337} + self._setup_to_server_tests(server_params) + self._test_cast(fanout=True, server_params=server_params) + + def _test_call(self, multi): + self.mock_connection = self.mox.CreateMock(self.orig_connection) + self.mock_session = self.mox.CreateMock(self.orig_session) + self.mock_sender = self.mox.CreateMock(self.orig_sender) + self.mock_receiver = self.mox.CreateMock(self.orig_receiver) + + self.mock_connection.opened().AndReturn(False) + self.mock_connection.open() + self.mock_connection.session().AndReturn(self.mock_session) + rcv_addr = mox.Regex(r'^.*/.* ; {"node": {"x-declare": {"auto-delete":' + ' true, "durable": true, "type": "direct"}, "type": ' + '"topic"}, "create": "always", "link": {"x-declare": ' + '{"auto-delete": true, "exclusive": true, "durable": ' + 'false}, "durable": true, "name": ".*"}}') + self.mock_session.receiver(rcv_addr).AndReturn(self.mock_receiver) + self.mock_receiver.capacity = 1 + send_addr = ('cinder/impl_qpid_test ; {"node": {"x-declare": ' + '{"auto-delete": true, "durable": false}, "type": "topic"}, ' + '"create": "always"}') + self.mock_session.sender(send_addr).AndReturn(self.mock_sender) + self.mock_sender.send(mox.IgnoreArg()) + + self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn( + self.mock_receiver) + self.mock_receiver.fetch().AndReturn(qpid.messaging.Message( + {"result": "foo", "failure": False, "ending": False})) + if multi: + self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn( + self.mock_receiver) + self.mock_receiver.fetch().AndReturn( + qpid.messaging.Message( + {"result": "bar", "failure": False, + "ending": False})) + self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn( + self.mock_receiver) + self.mock_receiver.fetch().AndReturn( + qpid.messaging.Message( + {"result": "baz", "failure": False, + "ending": False})) + self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn( + self.mock_receiver) + self.mock_receiver.fetch().AndReturn(qpid.messaging.Message( + {"failure": False, "ending": True})) + self.mock_session.close() + self.mock_connection.session().AndReturn(self.mock_session) + + self.mox.ReplayAll() + + try: + ctx = context.RequestContext("user", "project") + + if multi: + method = impl_qpid.multicall + else: + method = impl_qpid.call + + res = method(FLAGS, ctx, "impl_qpid_test", + {"method": "test_method", "args": {}}) + + if multi: + self.assertEquals(list(res), ["foo", "bar", "baz"]) + else: + self.assertEquals(res, "foo") + finally: + while impl_qpid.Connection.pool.free_items: + # Pull the mock connection object out of the connection pool so + # that it doesn't mess up other test cases. + impl_qpid.Connection.pool.get() + + @test.skip_if(qpid is None, "Test requires qpid") + def test_call(self): + self._test_call(multi=False) + + @test.skip_if(qpid is None, "Test requires qpid") + def test_multicall(self): + self._test_call(multi=True) + + +# +#from cinder.tests.rpc import common +# +# Qpid does not have a handy in-memory transport like kombu, so it's not +# terribly straight forward to take advantage of the common unit tests. +# However, at least at the time of this writing, the common unit tests all pass +# with qpidd running. +# +# class RpcQpidCommonTestCase(common._BaseRpcTestCase): +# def setUp(self): +# self.rpc = impl_qpid +# super(RpcQpidCommonTestCase, self).setUp() +# +# def tearDown(self): +# super(RpcQpidCommonTestCase, self).tearDown() +# diff --git a/cinder/tests/runtime_flags.py b/cinder/tests/runtime_flags.py new file mode 100644 index 00000000000..4327561fc5e --- /dev/null +++ b/cinder/tests/runtime_flags.py @@ -0,0 +1,23 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import flags +from cinder.openstack.common import cfg + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.IntOpt('runtime_answer', default=54, help='test flag')) diff --git a/cinder/tests/scheduler/__init__.py b/cinder/tests/scheduler/__init__.py new file mode 100644 index 00000000000..3be5ce944ce --- /dev/null +++ b/cinder/tests/scheduler/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from cinder.tests import * diff --git a/cinder/tests/scheduler/fakes.py b/cinder/tests/scheduler/fakes.py new file mode 100644 index 00000000000..384dae7ba91 --- /dev/null +++ b/cinder/tests/scheduler/fakes.py @@ -0,0 +1,62 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Fakes For Scheduler tests. +""" + +import mox + +from cinder import db +from cinder.scheduler import host_manager + + +class FakeHostManager(host_manager.HostManager): + """host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 + host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 + host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 + host4: free_ram_mb=8192 free_disk_gb=8192""" + + def __init__(self): + super(FakeHostManager, self).__init__() + + self.service_states = { + 'host1': { + 'compute': {'host_memory_free': 1073741824}, + }, + 'host2': { + 'compute': {'host_memory_free': 2147483648}, + }, + 'host3': { + 'compute': {'host_memory_free': 3221225472}, + }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, + } + + def get_host_list_from_db(self, context): + return [ + ('host1', dict(free_disk_gb=1024, free_ram_mb=1024)), + ('host2', dict(free_disk_gb=2048, free_ram_mb=2048)), + ('host3', dict(free_disk_gb=4096, free_ram_mb=4096)), + ('host4', dict(free_disk_gb=8192, free_ram_mb=8192)), + ] + + +class FakeHostState(host_manager.HostState): + def __init__(self, host, topic, attribute_dict): + super(FakeHostState, self).__init__(host, topic) + for (key, val) in attribute_dict.iteritems(): + setattr(self, key, val) diff --git a/cinder/tests/scheduler/test_scheduler.py b/cinder/tests/scheduler/test_scheduler.py new file mode 100644 index 00000000000..346132a7d48 --- /dev/null +++ b/cinder/tests/scheduler/test_scheduler.py @@ -0,0 +1,322 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler +""" + +import datetime +import json + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.notifier import api as notifier +from cinder import rpc +from cinder.rpc import common as rpc_common +from cinder.scheduler import driver +from cinder.scheduler import manager +from cinder import test +from cinder.tests.scheduler import fakes +from cinder import utils + +FLAGS = flags.FLAGS + + +class SchedulerManagerTestCase(test.TestCase): + """Test case for scheduler manager""" + + manager_cls = manager.SchedulerManager + driver_cls = driver.Scheduler + driver_cls_name = 'cinder.scheduler.driver.Scheduler' + + class AnException(Exception): + pass + + def setUp(self): + super(SchedulerManagerTestCase, self).setUp() + self.flags(scheduler_driver=self.driver_cls_name) + self.manager = self.manager_cls() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + self.fake_args = (1, 2, 3) + self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} + + def test_1_correct_init(self): + # Correct scheduler driver + manager = self.manager + self.assertTrue(isinstance(manager.driver, self.driver_cls)) + + def test_get_host_list(self): + expected = 'fake_hosts' + + self.mox.StubOutWithMock(self.manager.driver, 'get_host_list') + self.manager.driver.get_host_list().AndReturn(expected) + + self.mox.ReplayAll() + result = self.manager.get_host_list(self.context) + self.assertEqual(result, expected) + + def test_get_service_capabilities(self): + expected = 'fake_service_capabs' + + self.mox.StubOutWithMock(self.manager.driver, + 'get_service_capabilities') + self.manager.driver.get_service_capabilities().AndReturn( + expected) + + self.mox.ReplayAll() + result = self.manager.get_service_capabilities(self.context) + self.assertEqual(result, expected) + + def test_update_service_capabilities(self): + service_name = 'fake_service' + host = 'fake_host' + + self.mox.StubOutWithMock(self.manager.driver, + 'update_service_capabilities') + + # Test no capabilities passes empty dictionary + self.manager.driver.update_service_capabilities(service_name, + host, {}) + self.mox.ReplayAll() + result = self.manager.update_service_capabilities(self.context, + service_name=service_name, host=host) + self.mox.VerifyAll() + + self.mox.ResetAll() + # Test capabilities passes correctly + capabilities = {'fake_capability': 'fake_value'} + self.manager.driver.update_service_capabilities( + service_name, host, capabilities) + self.mox.ReplayAll() + result = self.manager.update_service_capabilities(self.context, + service_name=service_name, host=host, + capabilities=capabilities) + + def test_existing_method(self): + def stub_method(self, *args, **kwargs): + pass + setattr(self.manager.driver, 'schedule_stub_method', stub_method) + + self.mox.StubOutWithMock(self.manager.driver, + 'schedule_stub_method') + self.manager.driver.schedule_stub_method(self.context, + *self.fake_args, **self.fake_kwargs) + + self.mox.ReplayAll() + self.manager.stub_method(self.context, self.topic, + *self.fake_args, **self.fake_kwargs) + + def test_missing_method_fallback(self): + self.mox.StubOutWithMock(self.manager.driver, 'schedule') + self.manager.driver.schedule(self.context, self.topic, + 'noexist', *self.fake_args, **self.fake_kwargs) + + self.mox.ReplayAll() + self.manager.noexist(self.context, self.topic, + *self.fake_args, **self.fake_kwargs) + + def _mox_schedule_method_helper(self, method_name): + # Make sure the method exists that we're going to test call + def stub_method(*args, **kwargs): + pass + + setattr(self.manager.driver, method_name, stub_method) + + self.mox.StubOutWithMock(self.manager.driver, + method_name) + + +class SchedulerTestCase(test.TestCase): + """Test case for base scheduler driver class""" + + # So we can subclass this test and re-use tests if we need. + driver_cls = driver.Scheduler + + def setUp(self): + super(SchedulerTestCase, self).setUp() + self.driver = self.driver_cls() + self.context = context.RequestContext('fake_user', 'fake_project') + self.topic = 'fake_topic' + + def test_get_host_list(self): + expected = 'fake_hosts' + + self.mox.StubOutWithMock(self.driver.host_manager, 'get_host_list') + self.driver.host_manager.get_host_list().AndReturn(expected) + + self.mox.ReplayAll() + result = self.driver.get_host_list() + self.assertEqual(result, expected) + + def test_get_service_capabilities(self): + expected = 'fake_service_capabs' + + self.mox.StubOutWithMock(self.driver.host_manager, + 'get_service_capabilities') + self.driver.host_manager.get_service_capabilities().AndReturn( + expected) + + self.mox.ReplayAll() + result = self.driver.get_service_capabilities() + self.assertEqual(result, expected) + + def test_update_service_capabilities(self): + service_name = 'fake_service' + host = 'fake_host' + + self.mox.StubOutWithMock(self.driver.host_manager, + 'update_service_capabilities') + + capabilities = {'fake_capability': 'fake_value'} + self.driver.host_manager.update_service_capabilities( + service_name, host, capabilities) + self.mox.ReplayAll() + result = self.driver.update_service_capabilities(service_name, + host, capabilities) + + def test_hosts_up(self): + service1 = {'host': 'host1'} + service2 = {'host': 'host2'} + services = [service1, service2] + + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + self.mox.StubOutWithMock(utils, 'service_is_up') + + db.service_get_all_by_topic(self.context, + self.topic).AndReturn(services) + utils.service_is_up(service1).AndReturn(False) + utils.service_is_up(service2).AndReturn(True) + + self.mox.ReplayAll() + result = self.driver.hosts_up(self.context, self.topic) + self.assertEqual(result, ['host2']) + + +class SchedulerDriverBaseTestCase(SchedulerTestCase): + """Test cases for base scheduler driver class methods + that can't will fail if the driver is changed""" + + def test_unimplemented_schedule(self): + fake_args = (1, 2, 3) + fake_kwargs = {'cat': 'meow'} + + self.assertRaises(NotImplementedError, self.driver.schedule, + self.context, self.topic, 'schedule_something', + *fake_args, **fake_kwargs) + + +class SchedulerDriverModuleTestCase(test.TestCase): + """Test case for scheduler driver module methods""" + + def setUp(self): + super(SchedulerDriverModuleTestCase, self).setUp() + self.context = context.RequestContext('fake_user', 'fake_project') + + def test_cast_to_volume_host_update_db_with_volume_id(self): + host = 'fake_host1' + method = 'fake_method' + fake_kwargs = {'volume_id': 31337, + 'extra_arg': 'meow'} + queue = 'fake_queue' + + self.mox.StubOutWithMock(utils, 'utcnow') + self.mox.StubOutWithMock(db, 'volume_update') + self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'cast') + + utils.utcnow().AndReturn('fake-now') + db.volume_update(self.context, 31337, + {'host': host, 'scheduled_at': 'fake-now'}) + db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.cast(self.context, queue, + {'method': method, + 'args': fake_kwargs}) + + self.mox.ReplayAll() + driver.cast_to_volume_host(self.context, host, method, + update_db=True, **fake_kwargs) + + def test_cast_to_volume_host_update_db_without_volume_id(self): + host = 'fake_host1' + method = 'fake_method' + fake_kwargs = {'extra_arg': 'meow'} + queue = 'fake_queue' + + self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'cast') + + db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.cast(self.context, queue, + {'method': method, + 'args': fake_kwargs}) + + self.mox.ReplayAll() + driver.cast_to_volume_host(self.context, host, method, + update_db=True, **fake_kwargs) + + def test_cast_to_volume_host_no_update_db(self): + host = 'fake_host1' + method = 'fake_method' + fake_kwargs = {'extra_arg': 'meow'} + queue = 'fake_queue' + + self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'cast') + + db.queue_get_for(self.context, 'volume', host).AndReturn(queue) + rpc.cast(self.context, queue, + {'method': method, + 'args': fake_kwargs}) + + self.mox.ReplayAll() + driver.cast_to_volume_host(self.context, host, method, + update_db=False, **fake_kwargs) + + def test_cast_to_host_volume_topic(self): + host = 'fake_host1' + method = 'fake_method' + fake_kwargs = {'extra_arg': 'meow'} + + self.mox.StubOutWithMock(driver, 'cast_to_volume_host') + driver.cast_to_volume_host(self.context, host, method, + update_db=False, **fake_kwargs) + + self.mox.ReplayAll() + driver.cast_to_host(self.context, 'volume', host, method, + update_db=False, **fake_kwargs) + + def test_cast_to_host_unknown_topic(self): + host = 'fake_host1' + method = 'fake_method' + fake_kwargs = {'extra_arg': 'meow'} + topic = 'unknown' + queue = 'fake_queue' + + self.mox.StubOutWithMock(db, 'queue_get_for') + self.mox.StubOutWithMock(rpc, 'cast') + + db.queue_get_for(self.context, topic, host).AndReturn(queue) + rpc.cast(self.context, queue, + {'method': method, + 'args': fake_kwargs}) + + self.mox.ReplayAll() + driver.cast_to_host(self.context, topic, host, method, + update_db=False, **fake_kwargs) diff --git a/cinder/tests/test_SolidFireSanISCSIDriver.py b/cinder/tests/test_SolidFireSanISCSIDriver.py new file mode 100644 index 00000000000..4a1a6012100 --- /dev/null +++ b/cinder/tests/test_SolidFireSanISCSIDriver.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder import log as logging +from cinder.volume import san +from cinder import test + +LOG = logging.getLogger(__name__) + + +class SolidFireVolumeTestCase(test.TestCase): + def setUp(self): + super(SolidFireVolumeTestCase, self).setUp() + + def fake_issue_api_request(obj, method, params): + if method is 'GetClusterInfo': + LOG.info('Called Fake GetClusterInfo...') + results = {'result': {'clusterInfo': + {'name': 'fake-cluster', + 'mvip': '1.1.1.1', + 'svip': '1.1.1.1', + 'uniqueID': 'unqid', + 'repCount': 2, + 'attributes': {}}}} + return results + + elif method is 'AddAccount': + LOG.info('Called Fake AddAccount...') + return {'result': {'accountID': 25}, 'id': 1} + + elif method is 'GetAccountByName': + LOG.info('Called Fake GetAccountByName...') + results = {'result': {'account': { + 'accountID': 25, + 'username': params['username'], + 'status': 'active', + 'initiatorSecret': '123456789012', + 'targetSecret': '123456789012', + 'attributes': {}, + 'volumes': [6, 7, 20]}}, + "id": 1} + return results + + elif method is 'CreateVolume': + LOG.info('Called Fake CreateVolume...') + return {'result': {'volumeID': 5}, 'id': 1} + + elif method is 'DeleteVolume': + LOG.info('Called Fake DeleteVolume...') + return {'result': {}, 'id': 1} + + elif method is 'ListVolumesForAccount': + LOG.info('Called Fake ListVolumesForAccount...') + result = {'result': {'volumes': [{ + 'volumeID': '5', + 'name': 'test_volume', + 'accountID': 25, + 'sliceCount': 1, + 'totalSize': 1048576 * 1024, + 'enable512e': False, + 'access': "readWrite", + 'status': "active", + 'attributes':None, + 'qos':None}]}} + return result + + else: + LOG.error('Crap, unimplemented API call in Fake:%s' % method) + + def fake_issue_api_request_fails(obj, method, params): + return {'error': { + 'code': 000, + 'name': 'DummyError', + 'message': 'This is a fake error response'}, + 'id': 1} + + def test_create_volume(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1} + sfv = san.SolidFireSanISCSIDriver() + model_update = sfv.create_volume(testvol) + + def test_create_volume_fails(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request_fails) + testvol = {'project_id': 'testprjid', + 'name': 'testvol', + 'size': 1} + sfv = san.SolidFireSanISCSIDriver() + try: + sfv.create_volume(testvol) + self.fail("Should have thrown Error") + except Exception: + pass + + def test_create_sfaccount(self): + sfv = san.SolidFireSanISCSIDriver() + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + account = sfv._create_sfaccount('project-id') + self.assertNotEqual(account, None) + + def test_create_sfaccount_fails(self): + sfv = san.SolidFireSanISCSIDriver() + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request_fails) + account = sfv._create_sfaccount('project-id') + self.assertEqual(account, None) + + def test_get_sfaccount_by_name(self): + sfv = san.SolidFireSanISCSIDriver() + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + account = sfv._get_sfaccount_by_name('some-name') + self.assertNotEqual(account, None) + + def test_get_sfaccount_by_name_fails(self): + sfv = san.SolidFireSanISCSIDriver() + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request_fails) + account = sfv._get_sfaccount_by_name('some-name') + self.assertEqual(account, None) + + def test_delete_volume(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'test_volume', + 'size': 1} + sfv = san.SolidFireSanISCSIDriver() + model_update = sfv.delete_volume(testvol) + + def test_delete_volume_fails_no_volume(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1} + sfv = san.SolidFireSanISCSIDriver() + try: + model_update = sfv.delete_volume(testvol) + self.fail("Should have thrown Error") + except Exception: + pass + + def test_delete_volume_fails_account_lookup(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + testvol = {'project_id': 'testprjid', + 'name': 'no-name', + 'size': 1} + sfv = san.SolidFireSanISCSIDriver() + self.assertRaises(exception.DuplicateSfVolumeNames, + sfv.delete_volume, + testvol) + + def test_get_cluster_info(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request) + sfv = san.SolidFireSanISCSIDriver() + sfv._get_cluster_info() + + def test_get_cluster_info_fail(self): + self.stubs.Set(san.SolidFireSanISCSIDriver, '_issue_api_request', + self.fake_issue_api_request_fails) + sfv = san.SolidFireSanISCSIDriver() + self.assertRaises(exception.SolidFireAPIException, + sfv._get_cluster_info) diff --git a/cinder/tests/test_api.py b/cinder/tests/test_api.py new file mode 100644 index 00000000000..8a158c70639 --- /dev/null +++ b/cinder/tests/test_api.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the API endpoint""" + +import httplib +import StringIO + +import webob + + +class FakeHttplibSocket(object): + """a fake socket implementation for httplib.HTTPResponse, trivial""" + def __init__(self, response_string): + self.response_string = response_string + self._buffer = StringIO.StringIO(response_string) + + def makefile(self, _mode, _other): + """Returns the socket's internal buffer""" + return self._buffer + + +class FakeHttplibConnection(object): + """A fake httplib.HTTPConnection for boto to use + + requests made via this connection actually get translated and routed into + our WSGI app, we then wait for the response and turn it back into + the httplib.HTTPResponse that boto expects. + """ + def __init__(self, app, host, is_secure=False): + self.app = app + self.host = host + + def request(self, method, path, data, headers): + req = webob.Request.blank(path) + req.method = method + req.body = data + req.headers = headers + req.headers['Accept'] = 'text/html' + req.host = self.host + # Call the WSGI app, get the HTTP response + resp = str(req.get_response(self.app)) + # For some reason, the response doesn't have "HTTP/1.0 " prepended; I + # guess that's a function the web server usually provides. + resp = "HTTP/1.0 %s" % resp + self.sock = FakeHttplibSocket(resp) + self.http_response = httplib.HTTPResponse(self.sock) + # NOTE(vish): boto is accessing private variables for some reason + self._HTTPConnection__response = self.http_response + self.http_response.begin() + + def getresponse(self): + return self.http_response + + def getresponsebody(self): + return self.sock.response_string + + def close(self): + """Required for compatibility with boto/tornado""" + pass diff --git a/cinder/tests/test_compat_flagfile.py b/cinder/tests/test_compat_flagfile.py new file mode 100644 index 00000000000..a98b88fc1c3 --- /dev/null +++ b/cinder/tests/test_compat_flagfile.py @@ -0,0 +1,175 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import os +import shutil +import StringIO +import textwrap +import tempfile +import unittest +import uuid + +from cinder.compat import flagfile +from cinder import test + + +class ThatLastTwoPercentCoverageTestCase(unittest.TestCase): + def test_open_file_for_reading(self): + with flagfile._open_file_for_reading(__file__): + pass + + def test_open_fd_for_writing(self): + (fd, path) = tempfile.mkstemp() + try: + with flagfile._open_fd_for_writing(fd, None): + pass + finally: + os.remove(path) + + +class CompatFlagfileTestCase(test.TestCase): + def setUp(self): + super(CompatFlagfileTestCase, self).setUp() + self.files = {} + self.tempdir = str(uuid.uuid4()) + self.tempfiles = [] + + self.stubs.Set(flagfile, '_open_file_for_reading', self._fake_open) + self.stubs.Set(flagfile, '_open_fd_for_writing', self._fake_open) + self.stubs.Set(tempfile, 'mkdtemp', self._fake_mkdtemp) + self.stubs.Set(tempfile, 'mkstemp', self._fake_mkstemp) + self.stubs.Set(shutil, 'rmtree', self._fake_rmtree) + + def _fake_open(self, *args): + @contextlib.contextmanager + def managed_stringio(path): + if not path in self.files: + self.files[path] = "" + sio = StringIO.StringIO(textwrap.dedent(self.files[path])) + try: + yield sio + finally: + self.files[path] = sio.getvalue() + sio.close() + if len(args) == 2: + args = args[1:] # remove the fd arg for fdopen() case + return managed_stringio(args[0]) + + def _fake_mkstemp(self, *args, **kwargs): + self.assertTrue('dir' in kwargs) + self.assertEquals(kwargs['dir'], self.tempdir) + self.tempfiles.append(str(uuid.uuid4())) + return (None, self.tempfiles[-1]) + + def _fake_mkdtemp(self, *args, **kwargs): + return self.tempdir + + def _fake_rmtree(self, path): + self.assertEquals(self.tempdir, path) + self.tempdir = None + + def test_no_args(self): + before = [] + after = flagfile.handle_flagfiles(before, tempdir=self.tempdir) + self.assertEquals(after, before) + + def _do_test_empty_flagfile(self, before): + self.files['foo.flags'] = '' + after = flagfile.handle_flagfiles(before, tempdir=self.tempdir) + self.assertEquals(after, ['--config-file=' + self.tempfiles[-1]]) + self.assertEquals(self.files[self.tempfiles[-1]], '[DEFAULT]\n') + + def test_empty_flagfile(self): + self._do_test_empty_flagfile(['--flagfile=foo.flags']) + + def test_empty_flagfile_separated(self): + self._do_test_empty_flagfile(['--flagfile', 'foo.flags']) + + def test_empty_flagfile_single_hyphen(self): + self._do_test_empty_flagfile(['-flagfile=foo.flags']) + + def test_empty_flagfile_single_hyphen_separated_separated(self): + self._do_test_empty_flagfile(['-flagfile', 'foo.flags']) + + def test_empty_flagfile_with_other_args(self): + self.files['foo.flags'] = '' + + before = [ + '--foo', 'bar', + '--flagfile=foo.flags', + '--blaa=foo', + '--foo-flagfile', + '--flagfile-foo' + ] + + after = flagfile.handle_flagfiles(before, tempdir=self.tempdir) + + self.assertEquals(after, [ + '--foo', 'bar', + '--config-file=' + self.tempfiles[-1], + '--blaa=foo', + '--foo-flagfile', + '--flagfile-foo']) + self.assertEquals(self.files[self.tempfiles[-1]], '[DEFAULT]\n') + + def _do_test_flagfile(self, flags, conf): + self.files['foo.flags'] = flags + + before = ['--flagfile=foo.flags'] + + after = flagfile.handle_flagfiles(before, tempdir=self.tempdir) + + self.assertEquals(after, + ['--config-file=' + t + for t in reversed(self.tempfiles)]) + self.assertEquals(self.files[self.tempfiles[-1]], + '[DEFAULT]\n' + conf) + + def test_flagfile(self): + self._do_test_flagfile('--bar=foo', 'bar=foo\n') + + def test_boolean_flag(self): + self._do_test_flagfile('--verbose', 'verbose=true\n') + + def test_boolean_inverted_flag(self): + self._do_test_flagfile('--noverbose', 'verbose=false\n') + + def test_flagfile_comments(self): + self._do_test_flagfile(' \n\n#foo\n--bar=foo\n--foo=bar\n//bar', + 'bar=foo\nfoo=bar\n') + + def test_flagfile_is_config(self): + self.files['foo.flags'] = '\n\n#foo\n//bar\n[DEFAULT]\nbar=foo' + before = ['--flagfile=foo.flags'] + after = flagfile.handle_flagfiles(before, tempdir=self.tempdir) + self.assertEquals(after, ['--config-file=foo.flags']) + + def test_flagfile_nested(self): + self.files['bar.flags'] = '--foo=bar' + + self._do_test_flagfile('--flagfile=bar.flags', '') + + self.assertEquals(self.files[self.tempfiles[-2]], + '[DEFAULT]\nfoo=bar\n') + + def test_flagfile_managed(self): + self.files['foo.flags'] = '' + before = ['--flagfile=foo.flags'] + with flagfile.handle_flagfiles_managed(before) as after: + self.assertEquals(after, ['--config-file=' + self.tempfiles[-1]]) + self.assertEquals(self.files[self.tempfiles[-1]], '[DEFAULT]\n') + self.assertTrue(self.tempdir is None) diff --git a/cinder/tests/test_context.py b/cinder/tests/test_context.py new file mode 100644 index 00000000000..afa78cda6a4 --- /dev/null +++ b/cinder/tests/test_context.py @@ -0,0 +1,70 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder import test + + +class ContextTestCase(test.TestCase): + + def test_request_context_sets_is_admin(self): + ctxt = context.RequestContext('111', + '222', + roles=['admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_sets_is_admin_upcase(self): + ctxt = context.RequestContext('111', + '222', + roles=['Admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_read_deleted(self): + ctxt = context.RequestContext('111', + '222', + read_deleted='yes') + self.assertEquals(ctxt.read_deleted, 'yes') + + ctxt.read_deleted = 'no' + self.assertEquals(ctxt.read_deleted, 'no') + + def test_request_context_read_deleted_invalid(self): + self.assertRaises(ValueError, + context.RequestContext, + '111', + '222', + read_deleted=True) + + ctxt = context.RequestContext('111', '222') + self.assertRaises(ValueError, + setattr, + ctxt, + 'read_deleted', + True) + + def test_extra_args_to_context_get_logged(self): + info = {} + + def fake_warn(log_msg): + info['log_msg'] = log_msg + + self.stubs.Set(context.LOG, 'warn', fake_warn) + + c = context.RequestContext('user', 'project', + extra_arg1='meow', extra_arg2='wuff') + self.assertTrue(c) + self.assertIn("'extra_arg1': 'meow'", info['log_msg']) + self.assertIn("'extra_arg2': 'wuff'", info['log_msg']) diff --git a/cinder/tests/test_db_api.py b/cinder/tests/test_db_api.py new file mode 100644 index 00000000000..93e079d0e12 --- /dev/null +++ b/cinder/tests/test_db_api.py @@ -0,0 +1,331 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for the DB API""" + +from cinder import test +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags + +FLAGS = flags.FLAGS + + +def _get_fake_aggr_values(): + return {'name': 'fake_aggregate', + 'availability_zone': 'fake_avail_zone', } + + +def _get_fake_aggr_metadata(): + return {'fake_key1': 'fake_value1', + 'fake_key2': 'fake_value2'} + + +def _get_fake_aggr_hosts(): + return ['foo.openstack.org'] + + +def _create_aggregate(context=context.get_admin_context(), + values=_get_fake_aggr_values(), + metadata=_get_fake_aggr_metadata()): + return db.aggregate_create(context, values, metadata) + + +def _create_aggregate_with_hosts(context=context.get_admin_context(), + values=_get_fake_aggr_values(), + metadata=_get_fake_aggr_metadata(), + hosts=_get_fake_aggr_hosts()): + result = _create_aggregate(context=context, + values=values, metadata=metadata) + for host in hosts: + db.aggregate_host_add(context, result.id, host) + return result + + +class AggregateDBApiTestCase(test.TestCase): + def setUp(self): + super(AggregateDBApiTestCase, self).setUp() + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + + def test_aggregate_create(self): + """Ensure aggregate can be created with no metadata.""" + result = _create_aggregate(metadata=None) + self.assertEqual(result['operational_state'], 'created') + + def test_aggregate_create_avoid_name_conflict(self): + """Test we can avoid conflict on deleted aggregates.""" + r1 = _create_aggregate(metadata=None) + db.aggregate_delete(context.get_admin_context(), r1.id) + values = {'name': r1.name, 'availability_zone': 'new_zone'} + r2 = _create_aggregate(values=values) + self.assertEqual(r2.name, values['name']) + self.assertEqual(r2.availability_zone, values['availability_zone']) + self.assertEqual(r2.operational_state, "created") + + def test_aggregate_create_raise_exist_exc(self): + """Ensure aggregate names are distinct.""" + _create_aggregate(metadata=None) + self.assertRaises(exception.AggregateNameExists, + _create_aggregate, metadata=None) + + def test_aggregate_get_raise_not_found(self): + """Ensure AggregateNotFound is raised when getting an aggregate.""" + ctxt = context.get_admin_context() + # this does not exist! + aggregate_id = 1 + self.assertRaises(exception.AggregateNotFound, + db.aggregate_get, + ctxt, aggregate_id) + + def test_aggregate_metadata_get_raise_not_found(self): + """Ensure AggregateNotFound is raised when getting metadata.""" + ctxt = context.get_admin_context() + # this does not exist! + aggregate_id = 1 + self.assertRaises(exception.AggregateNotFound, + db.aggregate_metadata_get, + ctxt, aggregate_id) + + def test_aggregate_create_with_metadata(self): + """Ensure aggregate can be created with metadata.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt) + expected_metadata = db.aggregate_metadata_get(ctxt, result['id']) + self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata()) + + def test_aggregate_create_low_privi_context(self): + """Ensure right context is applied when creating aggregate.""" + self.assertRaises(exception.AdminRequired, + db.aggregate_create, + self.context, _get_fake_aggr_values()) + + def test_aggregate_get(self): + """Ensure we can get aggregate with all its relations.""" + ctxt = context.get_admin_context() + result = _create_aggregate_with_hosts(context=ctxt) + expected = db.aggregate_get(ctxt, result.id) + self.assertEqual(_get_fake_aggr_hosts(), expected.hosts) + self.assertEqual(_get_fake_aggr_metadata(), expected.metadetails) + + def test_aggregate_get_by_host(self): + """Ensure we can get an aggregate by host.""" + ctxt = context.get_admin_context() + r1 = _create_aggregate_with_hosts(context=ctxt) + r2 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org') + self.assertEqual(r1.id, r2.id) + + def test_aggregate_get_by_host_not_found(self): + """Ensure AggregateHostNotFound is raised with unknown host.""" + ctxt = context.get_admin_context() + _create_aggregate_with_hosts(context=ctxt) + self.assertRaises(exception.AggregateHostNotFound, + db.aggregate_get_by_host, ctxt, 'unknown_host') + + def test_aggregate_delete_raise_not_found(self): + """Ensure AggregateNotFound is raised when deleting an aggregate.""" + ctxt = context.get_admin_context() + # this does not exist! + aggregate_id = 1 + self.assertRaises(exception.AggregateNotFound, + db.aggregate_delete, + ctxt, aggregate_id) + + def test_aggregate_delete(self): + """Ensure we can delete an aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt, metadata=None) + db.aggregate_delete(ctxt, result['id']) + expected = db.aggregate_get_all(ctxt) + self.assertEqual(0, len(expected)) + aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'), + result['id']) + self.assertEqual(aggregate["operational_state"], "dismissed") + + def test_aggregate_update(self): + """Ensure an aggregate can be updated.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt, metadata=None) + new_values = _get_fake_aggr_values() + new_values['availability_zone'] = 'different_avail_zone' + updated = db.aggregate_update(ctxt, 1, new_values) + self.assertNotEqual(result.availability_zone, + updated.availability_zone) + + def test_aggregate_update_with_metadata(self): + """Ensure an aggregate can be updated with metadata.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt, metadata=None) + values = _get_fake_aggr_values() + values['metadata'] = _get_fake_aggr_metadata() + db.aggregate_update(ctxt, 1, values) + expected = db.aggregate_metadata_get(ctxt, result.id) + self.assertDictMatch(_get_fake_aggr_metadata(), expected) + + def test_aggregate_update_with_existing_metadata(self): + """Ensure an aggregate can be updated with existing metadata.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt) + values = _get_fake_aggr_values() + values['metadata'] = _get_fake_aggr_metadata() + values['metadata']['fake_key1'] = 'foo' + db.aggregate_update(ctxt, 1, values) + expected = db.aggregate_metadata_get(ctxt, result.id) + self.assertDictMatch(values['metadata'], expected) + + def test_aggregate_update_raise_not_found(self): + """Ensure AggregateNotFound is raised when updating an aggregate.""" + ctxt = context.get_admin_context() + # this does not exist! + aggregate_id = 1 + new_values = _get_fake_aggr_values() + self.assertRaises(exception.AggregateNotFound, + db.aggregate_update, ctxt, aggregate_id, new_values) + + def test_aggregate_get_all(self): + """Ensure we can get all aggregates.""" + ctxt = context.get_admin_context() + counter = 3 + for c in xrange(counter): + _create_aggregate(context=ctxt, + values={'name': 'fake_aggregate_%d' % c, + 'availability_zone': 'fake_avail_zone'}, + metadata=None) + results = db.aggregate_get_all(ctxt) + self.assertEqual(len(results), counter) + + def test_aggregate_get_all_non_deleted(self): + """Ensure we get only non-deleted aggregates.""" + ctxt = context.get_admin_context() + add_counter = 5 + remove_counter = 2 + aggregates = [] + for c in xrange(1, add_counter): + values = {'name': 'fake_aggregate_%d' % c, + 'availability_zone': 'fake_avail_zone'} + aggregates.append(_create_aggregate(context=ctxt, + values=values, metadata=None)) + for c in xrange(1, remove_counter): + db.aggregate_delete(ctxt, aggregates[c - 1].id) + results = db.aggregate_get_all(ctxt) + self.assertEqual(len(results), add_counter - remove_counter) + + def test_aggregate_metadata_add(self): + """Ensure we can add metadata for the aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt, metadata=None) + metadata = _get_fake_aggr_metadata() + db.aggregate_metadata_add(ctxt, result.id, metadata) + expected = db.aggregate_metadata_get(ctxt, result.id) + self.assertDictMatch(metadata, expected) + + def test_aggregate_metadata_update(self): + """Ensure we can update metadata for the aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt) + metadata = _get_fake_aggr_metadata() + key = metadata.keys()[0] + db.aggregate_metadata_delete(ctxt, result.id, key) + new_metadata = {key: 'foo'} + db.aggregate_metadata_add(ctxt, result.id, new_metadata) + expected = db.aggregate_metadata_get(ctxt, result.id) + metadata[key] = 'foo' + self.assertDictMatch(metadata, expected) + + def test_aggregate_metadata_delete(self): + """Ensure we can delete metadata for the aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt, metadata=None) + metadata = _get_fake_aggr_metadata() + db.aggregate_metadata_add(ctxt, result.id, metadata) + db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0]) + expected = db.aggregate_metadata_get(ctxt, result.id) + del metadata[metadata.keys()[0]] + self.assertDictMatch(metadata, expected) + + def test_aggregate_metadata_delete_raise_not_found(self): + """Ensure AggregateMetadataNotFound is raised when deleting.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt) + self.assertRaises(exception.AggregateMetadataNotFound, + db.aggregate_metadata_delete, + ctxt, result.id, 'foo_key') + + def test_aggregate_host_add(self): + """Ensure we can add host to the aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate_with_hosts(context=ctxt, metadata=None) + expected = db.aggregate_host_get_all(ctxt, result.id) + self.assertEqual(_get_fake_aggr_hosts(), expected) + + def test_aggregate_host_add_deleted(self): + """Ensure we can add a host that was previously deleted.""" + ctxt = context.get_admin_context() + result = _create_aggregate_with_hosts(context=ctxt, metadata=None) + host = _get_fake_aggr_hosts()[0] + db.aggregate_host_delete(ctxt, result.id, host) + db.aggregate_host_add(ctxt, result.id, host) + expected = db.aggregate_host_get_all(ctxt, result.id) + self.assertEqual(len(expected), 1) + + def test_aggregate_host_add_duplicate_raise_conflict(self): + """Ensure we cannot add host to distinct aggregates.""" + ctxt = context.get_admin_context() + _create_aggregate_with_hosts(context=ctxt, metadata=None) + self.assertRaises(exception.AggregateHostConflict, + _create_aggregate_with_hosts, ctxt, + values={'name': 'fake_aggregate2', + 'availability_zone': 'fake_avail_zone2', }, + metadata=None) + + def test_aggregate_host_add_duplicate_raise_exist_exc(self): + """Ensure we cannot add host to the same aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate_with_hosts(context=ctxt, metadata=None) + self.assertRaises(exception.AggregateHostExists, + db.aggregate_host_add, + ctxt, result.id, _get_fake_aggr_hosts()[0]) + + def test_aggregate_host_add_raise_not_found(self): + """Ensure AggregateFound when adding a host.""" + ctxt = context.get_admin_context() + # this does not exist! + aggregate_id = 1 + host = _get_fake_aggr_hosts()[0] + self.assertRaises(exception.AggregateNotFound, + db.aggregate_host_add, + ctxt, aggregate_id, host) + + def test_aggregate_host_delete(self): + """Ensure we can add host to the aggregate.""" + ctxt = context.get_admin_context() + result = _create_aggregate_with_hosts(context=ctxt, metadata=None) + db.aggregate_host_delete(ctxt, result.id, + _get_fake_aggr_hosts()[0]) + expected = db.aggregate_host_get_all(ctxt, result.id) + self.assertEqual(0, len(expected)) + + def test_aggregate_host_delete_raise_not_found(self): + """Ensure AggregateHostNotFound is raised when deleting a host.""" + ctxt = context.get_admin_context() + result = _create_aggregate(context=ctxt) + self.assertRaises(exception.AggregateHostNotFound, + db.aggregate_host_delete, + ctxt, result.id, _get_fake_aggr_hosts()[0]) diff --git a/cinder/tests/test_exception.py b/cinder/tests/test_exception.py new file mode 100644 index 00000000000..717fb3d85f0 --- /dev/null +++ b/cinder/tests/test_exception.py @@ -0,0 +1,126 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import test +from cinder import exception + + +class FakeNotifier(object): + """Acts like the cinder.notifier.api module.""" + ERROR = 88 + + def __init__(self): + self.provided_publisher = None + self.provided_event = None + self.provided_priority = None + self.provided_payload = None + + def notify(self, publisher, event, priority, payload): + self.provided_publisher = publisher + self.provided_event = event + self.provided_priority = priority + self.provided_payload = payload + + +def good_function(): + return 99 + + +def bad_function_error(): + raise exception.Error() + + +def bad_function_exception(): + raise test.TestingException() + + +class WrapExceptionTestCase(test.TestCase): + def test_wrap_exception_good_return(self): + wrapped = exception.wrap_exception() + self.assertEquals(99, wrapped(good_function)()) + + def test_wrap_exception_throws_error(self): + wrapped = exception.wrap_exception() + self.assertRaises(exception.Error, wrapped(bad_function_error)) + + def test_wrap_exception_throws_exception(self): + wrapped = exception.wrap_exception() + self.assertRaises(test.TestingException, + wrapped(bad_function_exception)) + + def test_wrap_exception_with_notifier(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier, "publisher", "event", + "level") + self.assertRaises(test.TestingException, + wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, "publisher") + self.assertEquals(notifier.provided_event, "event") + self.assertEquals(notifier.provided_priority, "level") + for key in ['exception', 'args']: + self.assertTrue(key in notifier.provided_payload.keys()) + + def test_wrap_exception_with_notifier_defaults(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier) + self.assertRaises(test.TestingException, + wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, None) + self.assertEquals(notifier.provided_event, "bad_function_exception") + self.assertEquals(notifier.provided_priority, notifier.ERROR) + + +class CinderExceptionTestCase(test.TestCase): + def test_default_error_msg(self): + class FakeCinderException(exception.CinderException): + message = "default message" + + exc = FakeCinderException() + self.assertEquals(unicode(exc), 'default message') + + def test_error_msg(self): + self.assertEquals(unicode(exception.CinderException('test')), + 'test') + + def test_default_error_msg_with_kwargs(self): + class FakeCinderException(exception.CinderException): + message = "default message: %(code)s" + + exc = FakeCinderException(code=500) + self.assertEquals(unicode(exc), 'default message: 500') + + def test_error_msg_exception_with_kwargs(self): + class FakeCinderException(exception.CinderException): + message = "default message: %(mispelled_code)s" + + exc = FakeCinderException(code=500) + self.assertEquals(unicode(exc), 'default message: %(mispelled_code)s') + + def test_default_error_code(self): + class FakeCinderException(exception.CinderException): + code = 404 + + exc = FakeCinderException() + self.assertEquals(exc.kwargs['code'], 404) + + def test_error_code_from_kwarg(self): + class FakeCinderException(exception.CinderException): + code = 500 + + exc = FakeCinderException(code=404) + self.assertEquals(exc.kwargs['code'], 404) diff --git a/cinder/tests/test_flags.py b/cinder/tests/test_flags.py new file mode 100644 index 00000000000..e94c3484ed2 --- /dev/null +++ b/cinder/tests/test_flags.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import tempfile + +from cinder import flags +from cinder.openstack.common import cfg +from cinder import test + +FLAGS = flags.FLAGS +FLAGS.register_opt(cfg.StrOpt('flags_unittest', + default='foo', + help='for testing purposes only')) + + +class FlagsTestCase(test.TestCase): + + def setUp(self): + super(FlagsTestCase, self).setUp() + self.FLAGS = flags.CinderConfigOpts() + self.global_FLAGS = flags.FLAGS + + def test_declare(self): + self.assert_('answer' not in self.global_FLAGS) + flags.DECLARE('answer', 'cinder.tests.declare_flags') + self.assert_('answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.answer, 42) + + # Make sure we don't overwrite anything + self.global_FLAGS.set_override('answer', 256) + self.assertEqual(self.global_FLAGS.answer, 256) + flags.DECLARE('answer', 'cinder.tests.declare_flags') + self.assertEqual(self.global_FLAGS.answer, 256) + + def test_getopt_non_interspersed_args(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + + argv = ['flags_test', 'extra_arg', '--runtime_answer=60'] + args = self.global_FLAGS(argv) + self.assertEqual(len(args), 3) + self.assertEqual(argv, args) + + def test_runtime_and_unknown_flags(self): + self.assert_('runtime_answer' not in self.global_FLAGS) + import cinder.tests.runtime_flags + self.assert_('runtime_answer' in self.global_FLAGS) + self.assertEqual(self.global_FLAGS.runtime_answer, 54) + + def test_long_vs_short_flags(self): + self.global_FLAGS.reset() + self.global_FLAGS.register_cli_opt(cfg.StrOpt('duplicate_answer_long', + default='val', + help='desc')) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + self.global_FLAGS.reset() + self.global_FLAGS.register_cli_opt(cfg.IntOpt('duplicate_answer', + default=60, + help='desc')) + args = self.global_FLAGS(argv) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + + def test_flag_leak_left(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_leak_right(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + + def test_flag_overrides(self): + self.assertEqual(FLAGS.flags_unittest, 'foo') + self.flags(flags_unittest='bar') + self.assertEqual(FLAGS.flags_unittest, 'bar') + self.reset_flags() + self.assertEqual(FLAGS.flags_unittest, 'foo') + + def test_flagfile(self): + opts = [ + cfg.StrOpt('string', default='default', help='desc'), + cfg.IntOpt('int', default=1, help='desc'), + cfg.BoolOpt('false', default=False, help='desc'), + cfg.BoolOpt('true', default=True, help='desc'), + cfg.MultiStrOpt('multi', default=['blaa'], help='desc'), + ] + + self.FLAGS.register_opts(opts) + + (fd, path) = tempfile.mkstemp(prefix='cinder', suffix='.flags') + + try: + os.write(fd, '--string=foo\n--int=2\n--false\n--notrue\n') + os.write(fd, '--multi=bar\n') + os.close(fd) + + self.FLAGS(['flags_test', '--flagfile=' + path]) + + self.assertEqual(self.FLAGS.string, 'foo') + self.assertEqual(self.FLAGS.int, 2) + self.assertEqual(self.FLAGS.false, True) + self.assertEqual(self.FLAGS.true, False) + self.assertEqual(self.FLAGS.multi, ['bar']) + + # Re-parse to test multistring isn't append multiple times + self.FLAGS(['flags_test', '--flagfile=' + path]) + self.assertEqual(self.FLAGS.multi, ['bar']) + finally: + os.remove(path) + + def test_defaults(self): + self.FLAGS.register_opt(cfg.StrOpt('foo', default='bar', help='desc')) + self.assertEqual(self.FLAGS.foo, 'bar') + + self.FLAGS.set_default('foo', 'blaa') + self.assertEqual(self.FLAGS.foo, 'blaa') + + def test_templated_values(self): + self.FLAGS.register_opt(cfg.StrOpt('foo', default='foo', help='desc')) + self.FLAGS.register_opt(cfg.StrOpt('bar', default='bar', help='desc')) + self.FLAGS.register_opt(cfg.StrOpt('blaa', + default='$foo$bar', help='desc')) + self.assertEqual(self.FLAGS.blaa, 'foobar') diff --git a/cinder/tests/test_iscsi.py b/cinder/tests/test_iscsi.py new file mode 100644 index 00000000000..d81b4ca5e22 --- /dev/null +++ b/cinder/tests/test_iscsi.py @@ -0,0 +1,116 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import string + +from cinder import test +from cinder.volume import iscsi + + +class TargetAdminTestCase(object): + + def setUp(self): + self.cmds = [] + + self.tid = 1 + self.target_name = 'iqn.2011-09.org.foo.bar:blaa' + self.lun = 10 + self.path = '/foo/bar/blaa' + + self.script_template = None + + def get_script_params(self): + return {'tid': self.tid, + 'target_name': self.target_name, + 'lun': self.lun, + 'path': self.path} + + def get_script(self): + return self.script_template % self.get_script_params() + + def fake_execute(self, *cmd, **kwargs): + self.cmds.append(string.join(cmd)) + return "", None + + def clear_cmds(self): + cmds = [] + + def verify_cmds(self, cmds): + self.assertEqual(len(cmds), len(self.cmds)) + for a, b in zip(cmds, self.cmds): + self.assertEqual(a, b) + + def verify(self): + script = self.get_script() + cmds = [] + for line in script.split('\n'): + if not line.strip(): + continue + cmds.append(line) + self.verify_cmds(cmds) + + def run_commands(self): + tgtadm = iscsi.get_target_admin() + tgtadm.set_execute(self.fake_execute) + tgtadm.new_target(self.target_name, self.tid) + tgtadm.show_target(self.tid) + tgtadm.new_logicalunit(self.tid, self.lun, self.path) + tgtadm.delete_logicalunit(self.tid, self.lun) + tgtadm.delete_target(self.tid) + + def test_target_admin(self): + self.clear_cmds() + self.run_commands() + self.verify() + + +class TgtAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(TgtAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='tgtadm') + self.script_template = "\n".join([ + "tgtadm --op new --lld=iscsi --mode=target --tid=%(tid)s " + "--targetname=%(target_name)s", + "tgtadm --op bind --lld=iscsi --mode=target --initiator-address=ALL " + "--tid=%(tid)s", + "tgtadm --op show --lld=iscsi --mode=target --tid=%(tid)s", + "tgtadm --op new --lld=iscsi --mode=logicalunit --tid=%(tid)s " + "--lun=%(lun)d --backing-store=%(path)s", + "tgtadm --op delete --lld=iscsi --mode=logicalunit --tid=%(tid)s " + "--lun=%(lun)d", + "tgtadm --op delete --lld=iscsi --mode=target --tid=%(tid)s"]) + + def get_script_params(self): + params = super(TgtAdmTestCase, self).get_script_params() + params['lun'] += 1 + return params + + +class IetAdmTestCase(test.TestCase, TargetAdminTestCase): + + def setUp(self): + super(IetAdmTestCase, self).setUp() + TargetAdminTestCase.setUp(self) + self.flags(iscsi_helper='ietadm') + self.script_template = "\n".join([ + "ietadm --op new --tid=%(tid)s --params Name=%(target_name)s", + "ietadm --op show --tid=%(tid)s", + "ietadm --op new --tid=%(tid)s --lun=%(lun)d " + "--params Path=%(path)s,Type=fileio", + "ietadm --op delete --tid=%(tid)s --lun=%(lun)d", + "ietadm --op delete --tid=%(tid)s"]) diff --git a/cinder/tests/test_log.py b/cinder/tests/test_log.py new file mode 100644 index 00000000000..3d07df447d5 --- /dev/null +++ b/cinder/tests/test_log.py @@ -0,0 +1,218 @@ +import cStringIO +import json +import logging +import sys + +from cinder import context +from cinder import flags +from cinder import log +from cinder.notifier import api as notifier +from cinder import test + +FLAGS = flags.FLAGS +flags.DECLARE('list_notifier_drivers', + 'cinder.notifier.list_notifier') + + +def _fake_context(): + return context.RequestContext(1, 1) + + +class LoggerTestCase(test.TestCase): + def setUp(self): + super(LoggerTestCase, self).setUp() + self.log = log.getLogger() + + def test_handlers_have_cinder_formatter(self): + formatters = [] + for h in self.log.logger.handlers: + f = h.formatter + if isinstance(f, log.LegacyCinderFormatter): + formatters.append(f) + self.assert_(formatters) + self.assertEqual(len(formatters), len(self.log.logger.handlers)) + + def test_handles_context_kwarg(self): + self.log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_audit_handles_context_arg(self): + self.log.audit("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_will_be_verbose_if_verbose_flag_set(self): + self.flags(verbose=True) + log.setup() + self.assertEqual(logging.DEBUG, self.log.logger.getEffectiveLevel()) + + def test_will_not_be_verbose_if_verbose_flag_not_set(self): + self.flags(verbose=False) + log.setup() + self.assertEqual(logging.INFO, self.log.logger.getEffectiveLevel()) + + def test_no_logging_via_module(self): + for func in ('critical', 'error', 'exception', 'warning', 'warn', + 'info', 'debug', 'log', 'audit'): + self.assertRaises(AttributeError, getattr, log, func) + + +class LogHandlerTestCase(test.TestCase): + def test_log_path_logdir(self): + self.flags(logdir='/some/path', logfile=None) + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_logfile(self): + self.flags(logfile='/some/path/foo-bar.log') + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + def test_log_path_none(self): + self.flags(logdir=None, logfile=None) + self.assertTrue(log._get_log_file_path(binary='foo-bar') is None) + + def test_log_path_logfile_overrides_logdir(self): + self.flags(logdir='/some/other/path', + logfile='/some/path/foo-bar.log') + self.assertEquals(log._get_log_file_path(binary='foo-bar'), + '/some/path/foo-bar.log') + + +class PublishErrorsHandlerTestCase(test.TestCase): + """Tests for cinder.log.PublishErrorsHandler""" + def setUp(self): + super(PublishErrorsHandlerTestCase, self).setUp() + self.publiserrorshandler = log.PublishErrorsHandler(logging.ERROR) + + def test_emit_cfg_list_notifier_drivers_in_flags(self): + self.stub_flg = False + + def fake_notifier(*args, **kwargs): + self.stub_flg = True + + self.stubs.Set(notifier, 'notify', fake_notifier) + logrecord = logging.LogRecord('name', 'WARN', '/tmp', 1, + 'Message', None, None) + self.publiserrorshandler.emit(logrecord) + self.assertTrue(self.stub_flg) + + def test_emit_cfg_log_notifier_in_list_notifier_drivers(self): + self.flags(list_notifier_drivers=['cinder.notifier.rabbit_notifier', + 'cinder.notifier.log_notifier']) + self.stub_flg = True + + def fake_notifier(*args, **kwargs): + self.stub_flg = False + + self.stubs.Set(notifier, 'notify', fake_notifier) + logrecord = logging.LogRecord('name', 'WARN', '/tmp', 1, + 'Message', None, None) + self.publiserrorshandler.emit(logrecord) + self.assertTrue(self.stub_flg) + + +class CinderFormatterTestCase(test.TestCase): + def setUp(self): + super(CinderFormatterTestCase, self).setUp() + self.flags(logging_context_format_string="HAS CONTEXT " + "[%(request_id)s]: " + "%(message)s", + logging_default_format_string="NOCTXT: %(message)s", + logging_debug_format_suffix="--DBG") + self.log = log.getLogger() + self.stream = cStringIO.StringIO() + self.handler = logging.StreamHandler(self.stream) + self.handler.setFormatter(log.LegacyCinderFormatter()) + self.log.logger.addHandler(self.handler) + self.level = self.log.logger.getEffectiveLevel() + self.log.logger.setLevel(logging.DEBUG) + + def tearDown(self): + self.log.logger.setLevel(self.level) + self.log.logger.removeHandler(self.handler) + super(CinderFormatterTestCase, self).tearDown() + + def test_uncontextualized_log(self): + self.log.info("foo") + self.assertEqual("NOCTXT: foo\n", self.stream.getvalue()) + + def test_contextualized_log(self): + ctxt = _fake_context() + self.log.info("bar", context=ctxt) + expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id + self.assertEqual(expected, self.stream.getvalue()) + + def test_debugging_log(self): + self.log.debug("baz") + self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue()) + + +class CinderLoggerTestCase(test.TestCase): + def setUp(self): + super(CinderLoggerTestCase, self).setUp() + levels = FLAGS.default_log_levels + levels.append("cinder-test=AUDIT") + self.flags(default_log_levels=levels, + verbose=True) + log.setup() + self.log = log.getLogger('cinder-test') + + def test_has_level_from_flags(self): + self.assertEqual(logging.AUDIT, self.log.logger.getEffectiveLevel()) + + def test_child_log_has_level_of_parent_flag(self): + l = log.getLogger('cinder-test.foo') + self.assertEqual(logging.AUDIT, l.logger.getEffectiveLevel()) + + +class JSONFormatterTestCase(test.TestCase): + def setUp(self): + super(JSONFormatterTestCase, self).setUp() + self.log = log.getLogger('test-json') + self.stream = cStringIO.StringIO() + handler = logging.StreamHandler(self.stream) + handler.setFormatter(log.JSONFormatter()) + self.log.logger.addHandler(handler) + self.log.logger.setLevel(logging.DEBUG) + + def test_json(self): + test_msg = 'This is a %(test)s line' + test_data = {'test': 'log'} + self.log.debug(test_msg, test_data) + + data = json.loads(self.stream.getvalue()) + self.assertTrue(data) + self.assertTrue('extra' in data) + self.assertEqual('test-json', data['name']) + + self.assertEqual(test_msg % test_data, data['message']) + self.assertEqual(test_msg, data['msg']) + self.assertEqual(test_data, data['args']) + + self.assertEqual('test_log.py', data['filename']) + self.assertEqual('test_json', data['funcname']) + + self.assertEqual('DEBUG', data['levelname']) + self.assertEqual(logging.DEBUG, data['levelno']) + self.assertFalse(data['traceback']) + + def test_json_exception(self): + test_msg = 'This is %s' + test_data = 'exceptional' + try: + raise Exception('This is exceptional') + except Exception: + self.log.exception(test_msg, test_data) + + data = json.loads(self.stream.getvalue()) + self.assertTrue(data) + self.assertTrue('extra' in data) + self.assertEqual('test-json', data['name']) + + self.assertEqual(test_msg % test_data, data['message']) + self.assertEqual(test_msg, data['msg']) + self.assertEqual([test_data], data['args']) + + self.assertEqual('ERROR', data['levelname']) + self.assertEqual(logging.ERROR, data['levelno']) + self.assertTrue(data['traceback']) diff --git a/cinder/tests/test_migrations.conf b/cinder/tests/test_migrations.conf new file mode 100644 index 00000000000..774f1499406 --- /dev/null +++ b/cinder/tests/test_migrations.conf @@ -0,0 +1,9 @@ +[DEFAULT] +# Set up any number of migration data stores you want, one +# The "name" used in the test is the config variable key. +#sqlite=sqlite:///test_migrations.db +sqlite=sqlite:// +#mysql=mysql://root:@localhost/test_migrations +#postgresql=postgresql://user:pass@localhost/test_migrations +[walk_style] +snake_walk=yes diff --git a/cinder/tests/test_migrations.py b/cinder/tests/test_migrations.py new file mode 100644 index 00000000000..e16e9d04f91 --- /dev/null +++ b/cinder/tests/test_migrations.py @@ -0,0 +1,296 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack, LLC +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for database migrations. This test case reads the configuration +file test_migrations.conf for database connection settings +to use in the tests. For each connection found in the config file, +the test case runs a series of test cases to ensure that migrations work +properly both upgrading and downgrading, and that no data loss occurs +if possible. +""" + +import ConfigParser +import commands +import os +import unittest +import urlparse + +from migrate.versioning import repository +import sqlalchemy + +import cinder.db.sqlalchemy.migrate_repo +from cinder.db.sqlalchemy.migration import versioning_api as migration_api +from cinder import log as logging +from cinder import test + +LOG = logging.getLogger('cinder.tests.test_migrations') + + +def _mysql_get_connect_string(user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): + """ + Try to get a connection with a very specfic set of values, if we get + these then we'll run the mysql tests, otherwise they are skipped + """ + return "mysql://%(user)s:%(passwd)s@localhost/%(database)s" % locals() + + +def _is_mysql_avail(user="openstack_citest", + passwd="openstack_citest", + database="openstack_citest"): + try: + connect_uri = _mysql_get_connect_string( + user=user, passwd=passwd, database=database) + engine = sqlalchemy.create_engine(connect_uri) + connection = engine.connect() + except Exception: + # intential catch all to handle exceptions even if we don't + # have mysql code loaded at all. + return False + else: + connection.close() + return True + + +def _missing_mysql(): + if "NOVA_TEST_MYSQL_PRESENT" in os.environ: + return True + return not _is_mysql_avail() + + +class TestMigrations(test.TestCase): + """Test sqlalchemy-migrate migrations""" + + TEST_DATABASES = {} + DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), + 'test_migrations.conf') + # Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable + # to override the location of the config file for migration testing + CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF', + DEFAULT_CONFIG_FILE) + MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__ + REPOSITORY = repository.Repository( + os.path.abspath(os.path.dirname(MIGRATE_FILE))) + + def setUp(self): + super(TestMigrations, self).setUp() + + self.snake_walk = False + + # Load test databases from the config file. Only do this + # once. No need to re-run this on each test... + LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH) + if not TestMigrations.TEST_DATABASES: + if os.path.exists(TestMigrations.CONFIG_FILE_PATH): + cp = ConfigParser.RawConfigParser() + try: + cp.read(TestMigrations.CONFIG_FILE_PATH) + defaults = cp.defaults() + for key, value in defaults.items(): + TestMigrations.TEST_DATABASES[key] = value + self.snake_walk = cp.getboolean('walk_style', 'snake_walk') + except ConfigParser.ParsingError, e: + self.fail("Failed to read test_migrations.conf config " + "file. Got error: %s" % e) + else: + self.fail("Failed to find test_migrations.conf config " + "file.") + + self.engines = {} + for key, value in TestMigrations.TEST_DATABASES.items(): + self.engines[key] = sqlalchemy.create_engine(value) + + # We start each test case with a completely blank slate. + self._reset_databases() + + def tearDown(self): + + # We destroy the test data store between each test case, + # and recreate it, which ensures that we have no side-effects + # from the tests + self._reset_databases() + + # remove these from the list so they aren't used in the migration tests + if "mysqlcitest" in self.engines: + del self.engines["mysqlcitest"] + if "mysqlcitest" in TestMigrations.TEST_DATABASES: + del TestMigrations.TEST_DATABASES["mysqlcitest"] + super(TestMigrations, self).tearDown() + + def _reset_databases(self): + def execute_cmd(cmd=None): + status, output = commands.getstatusoutput(cmd) + LOG.debug(output) + self.assertEqual(0, status) + for key, engine in self.engines.items(): + conn_string = TestMigrations.TEST_DATABASES[key] + conn_pieces = urlparse.urlparse(conn_string) + if conn_string.startswith('sqlite'): + # We can just delete the SQLite database, which is + # the easiest and cleanest solution + db_path = conn_pieces.path.strip('/') + if os.path.exists(db_path): + os.unlink(db_path) + # No need to recreate the SQLite DB. SQLite will + # create it for us if it's not there... + elif conn_string.startswith('mysql'): + # We can execute the MySQL client to destroy and re-create + # the MYSQL database, which is easier and less error-prone + # than using SQLAlchemy to do this via MetaData...trust me. + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = "-p\"%s\"" % auth_pieces[1] + sql = ("drop database if exists %(database)s; " + "create database %(database)s;") % locals() + cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s " + "-e \"%(sql)s\"") % locals() + execute_cmd(cmd) + elif conn_string.startswith('postgresql'): + database = conn_pieces.path.strip('/') + loc_pieces = conn_pieces.netloc.split('@') + host = loc_pieces[1] + auth_pieces = loc_pieces[0].split(':') + user = auth_pieces[0] + password = "" + if len(auth_pieces) > 1: + if auth_pieces[1].strip(): + password = auth_pieces[1] + cmd = ("touch ~/.pgpass;" + "chmod 0600 ~/.pgpass;" + "sed -i -e" + "'1{s/^.*$/\*:\*:\*:%(user)s:%(password)s/};" + "1!d' ~/.pgpass") % locals() + execute_cmd(cmd) + sql = ("UPDATE pg_catalog.pg_database SET datallowconn=false " + "WHERE datname='%(database)s';") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("SELECT pg_catalog.pg_terminate_backend(procpid) " + "FROM pg_catalog.pg_stat_activity " + "WHERE datname='%(database)s';") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("drop database if exists %(database)s;") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + sql = ("create database %(database)s;") % locals() + cmd = ("psql -U%(user)s -h%(host)s -c\"%(sql)s\"") % locals() + execute_cmd(cmd) + + def test_walk_versions(self): + """ + Walks all version scripts for each tested database, ensuring + that there are no errors in the version scripts for each engine + """ + for key, engine in self.engines.items(): + self._walk_versions(engine, self.snake_walk) + + def test_mysql_connect_fail(self): + """ + Test that we can trigger a mysql connection failure and we fail + gracefully to ensure we don't break people without mysql + """ + if _is_mysql_avail(user="openstack_cifail"): + self.fail("Shouldn't have connected") + + @test.skip_if(_missing_mysql(), "mysql not available") + def test_mysql_innodb(self): + """ + Test that table creation on mysql only builds InnoDB tables + """ + # add this to the global lists to make reset work with it, it's removed + # automaticaly in tearDown so no need to clean it up here. + connect_string = _mysql_get_connect_string() + engine = sqlalchemy.create_engine(connect_string) + self.engines["mysqlcitest"] = engine + TestMigrations.TEST_DATABASES["mysqlcitest"] = connect_string + + # build a fully populated mysql database with all the tables + self._reset_databases() + self._walk_versions(engine, False, False) + + uri = self._mysql_get_connect_string(database="information_schema") + connection = sqlalchemy.create_engine(uri).connect() + + # sanity check + total = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='openstack_citest'") + self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?") + + noninnodb = connection.execute("SELECT count(*) " + "from information_schema.TABLES " + "where TABLE_SCHEMA='openstack_citest' " + "and ENGINE!='InnoDB'") + count = noninnodb.scalar() + self.assertEqual(count, 0, "%d non InnoDB tables created" % count) + + def _walk_versions(self, engine=None, snake_walk=False, downgrade=True): + # Determine latest version script from the repo, then + # upgrade from 1 through to the latest, with no data + # in the databases. This just checks that the schema itself + # upgrades successfully. + + # Place the database under version control + migration_api.version_control(engine, TestMigrations.REPOSITORY) + self.assertEqual(0, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest) + + for version in xrange(1, TestMigrations.REPOSITORY.latest + 1): + # upgrade -> downgrade -> upgrade + self._migrate_up(engine, version) + if snake_walk: + self._migrate_down(engine, version - 1) + self._migrate_up(engine, version) + + if downgrade: + # Now walk it back down to 0 from the latest, testing + # the downgrade paths. + for version in reversed( + xrange(0, TestMigrations.REPOSITORY.latest)): + # downgrade -> upgrade -> downgrade + self._migrate_down(engine, version) + if snake_walk: + self._migrate_up(engine, version + 1) + self._migrate_down(engine, version) + + def _migrate_down(self, engine, version): + migration_api.downgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) + + def _migrate_up(self, engine, version): + migration_api.upgrade(engine, + TestMigrations.REPOSITORY, + version) + self.assertEqual(version, + migration_api.db_version(engine, + TestMigrations.REPOSITORY)) diff --git a/cinder/tests/test_misc.py b/cinder/tests/test_misc.py new file mode 100644 index 00000000000..d89b2a69896 --- /dev/null +++ b/cinder/tests/test_misc.py @@ -0,0 +1,184 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import errno +import glob +import os +import select + +from eventlet import greenpool +from eventlet import greenthread +import lockfile + +from cinder import exception +from cinder import test +from cinder import utils + + +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + +class ProjectTestCase(test.TestCase): + def test_authors_up_to_date(self): + topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') + missing = set() + contributors = set() + mailmap = utils.parse_mailmap(os.path.join(topdir, '.mailmap')) + authors_file = open(os.path.join(topdir, + 'Authors'), 'r').read().lower() + + if os.path.exists(os.path.join(topdir, '.git')): + for email in commands.getoutput('git log --format=%ae').split(): + if not email: + continue + if "jenkins" in email and "openstack.org" in email: + continue + email = '<' + email.lower() + '>' + contributors.add(utils.str_dict_replace(email, mailmap)) + else: + return + + for contributor in contributors: + if contributor == 'cinder-core': + continue + if not contributor in authors_file: + missing.add(contributor) + + self.assertTrue(len(missing) == 0, + '%r not listed in Authors' % missing) + + def test_all_migrations_have_downgrade(self): + topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') + py_glob = os.path.join(topdir, "cinder", "db", "sqlalchemy", + "migrate_repo", "versions", "*.py") + missing_downgrade = [] + for path in glob.iglob(py_glob): + has_upgrade = False + has_downgrade = False + with open(path, "r") as f: + for line in f: + if 'def upgrade(' in line: + has_upgrade = True + if 'def downgrade(' in line: + has_downgrade = True + + if has_upgrade and not has_downgrade: + fname = os.path.basename(path) + missing_downgrade.append(fname) + + helpful_msg = (_("The following migrations are missing a downgrade:" + "\n\t%s") % '\n\t'.join(sorted(missing_downgrade))) + self.assert_(not missing_downgrade, helpful_msg) + + +class LockTestCase(test.TestCase): + def test_synchronized_wrapped_function_metadata(self): + @utils.synchronized('whatever') + def foo(): + """Bar""" + pass + self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring " + "got lost") + self.assertEquals(foo.__name__, 'foo', "Wrapped function's name " + "got mangled") + + def test_synchronized_internally(self): + """We can lock across multiple green threads""" + saved_sem_num = len(utils._semaphores) + seen_threads = list() + + @utils.synchronized('testlock2', external=False) + def f(id): + for x in range(10): + seen_threads.append(id) + greenthread.sleep(0) + + threads = [] + pool = greenpool.GreenPool(10) + for i in range(10): + threads.append(pool.spawn(f, i)) + + for thread in threads: + thread.wait() + + self.assertEquals(len(seen_threads), 100) + # Looking at the seen threads, split it into chunks of 10, and verify + # that the last 9 match the first in each chunk. + for i in range(10): + for j in range(9): + self.assertEquals(seen_threads[i * 10], + seen_threads[i * 10 + 1 + j]) + + self.assertEqual(saved_sem_num, len(utils._semaphores), + "Semaphore leak detected") + + def test_nested_external_fails(self): + """We can not nest external syncs""" + + @utils.synchronized('testlock1', external=True) + def outer_lock(): + + @utils.synchronized('testlock2', external=True) + def inner_lock(): + pass + inner_lock() + try: + self.assertRaises(lockfile.NotMyLock, outer_lock) + finally: + utils.cleanup_file_locks() + + def test_synchronized_externally(self): + """We can lock across multiple processes""" + rpipe1, wpipe1 = os.pipe() + rpipe2, wpipe2 = os.pipe() + + @utils.synchronized('testlock1', external=True) + def f(rpipe, wpipe): + try: + os.write(wpipe, "foo") + except OSError, e: + self.assertEquals(e.errno, errno.EPIPE) + return + + rfds, _wfds, _efds = select.select([rpipe], [], [], 1) + self.assertEquals(len(rfds), 0, "The other process, which was" + " supposed to be locked, " + "wrote on its end of the " + "pipe") + os.close(rpipe) + + pid = os.fork() + if pid > 0: + os.close(wpipe1) + os.close(rpipe2) + + f(rpipe1, wpipe2) + else: + os.close(rpipe1) + os.close(wpipe2) + + f(rpipe2, wpipe1) + os._exit(0) diff --git a/cinder/tests/test_netapp.py b/cinder/tests/test_netapp.py new file mode 100644 index 00000000000..179e46b8b33 --- /dev/null +++ b/cinder/tests/test_netapp.py @@ -0,0 +1,927 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for NetApp volume driver + +""" + +import BaseHTTPServer +import httplib +import StringIO + +from lxml import etree + +from cinder import log as logging +from cinder import test +from cinder.volume import netapp + +LOG = logging.getLogger("cinder.volume.driver") + + +WSDL_HEADER = """ +""" + +WSDL_TYPES = """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + +WSDL_TRAILER = """ + + +""" + +RESPONSE_PREFIX = """ +""" + +RESPONSE_SUFFIX = """""" + +APIS = ['ApiProxy', 'DatasetListInfoIterStart', 'DatasetListInfoIterNext', + 'DatasetListInfoIterEnd', 'DatasetEditBegin', 'DatasetEditCommit', + 'DatasetProvisionMember', 'DatasetRemoveMember', 'DfmAbout', + 'DpJobProgressEventListIterStart', 'DpJobProgressEventListIterNext', + 'DpJobProgressEventListIterEnd', 'DatasetMemberListInfoIterStart', + 'DatasetMemberListInfoIterNext', 'DatasetMemberListInfoIterEnd', + 'HostListInfoIterStart', 'HostListInfoIterNext', 'HostListInfoIterEnd', + 'LunListInfoIterStart', 'LunListInfoIterNext', 'LunListInfoIterEnd', + 'StorageServiceDatasetProvision'] + +iter_count = 0 +iter_table = {} + + +class FakeDfmServerHandler(BaseHTTPServer.BaseHTTPRequestHandler): + """HTTP handler that fakes enough stuff to allow the driver to run""" + + def do_GET(s): + """Respond to a GET request.""" + if '/dfm.wsdl' != s.path: + s.send_response(404) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "application/wsdl+xml") + s.end_headers() + out = s.wfile + out.write(WSDL_HEADER) + out.write(WSDL_TYPES) + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('') + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + for api in APIS: + out.write('' % api) + out.write('' % api) + out.write('') + out.write('') + out.write('') + out.write('') + out.write(WSDL_TRAILER) + + def do_POST(s): + """Respond to a POST request.""" + if '/apis/soap/v1' != s.path: + s.send_response(404) + s.end_headers + return + request_xml = s.rfile.read(int(s.headers['Content-Length'])) + ntap_ns = 'http://www.netapp.com/management/v1' + nsmap = {'env': 'http://schemas.xmlsoap.org/soap/envelope/', + 'na': ntap_ns} + root = etree.fromstring(request_xml) + + body = root.xpath('/env:Envelope/env:Body', namespaces=nsmap)[0] + request = body.getchildren()[0] + tag = request.tag + if not tag.startswith('{' + ntap_ns + '}'): + s.send_response(500) + s.end_headers + return + api = tag[(2 + len(ntap_ns)):] + global iter_count + global iter_table + if 'DatasetListInfoIterStart' == api: + body = """ + 1 + dataset + """ + elif 'DatasetListInfoIterNext' == api: + body = """ + + + 0 + + + 1 + """ + elif 'DatasetListInfoIterEnd' == api: + body = """""" + elif 'DatasetEditBegin' == api: + body = """ + 0 + """ + elif 'DatasetEditCommit' == api: + body = """ + false + + + 0 + + + """ + elif 'DatasetProvisionMember' == api: + body = """""" + elif 'DatasetRemoveMember' == api: + body = """""" + elif 'DfmAbout' == api: + body = """""" + elif 'DpJobProgressEventListIterStart' == api: + iter_name = 'dpjobprogress_%s' % iter_count + iter_count = iter_count + 1 + iter_table[iter_name] = 0 + body = """ + 2 + %s + """ % iter_name + elif 'DpJobProgressEventListIterNext' == api: + tags = body.xpath('na:DpJobProgressEventListIterNext/na:Tag', + namespaces=nsmap) + iter_name = tags[0].text + if iter_table[iter_name]: + body = """""" + else: + iter_table[iter_name] = 1 + body = """ + + + normal + lun-create + + 0 + + + + normal + job-end + + + 2 + """ + elif 'DpJobProgressEventListIterEnd' == api: + body = """""" + elif 'DatasetMemberListInfoIterStart' == api: + body = """ + 1 + dataset-member + """ + elif 'DatasetMemberListInfoIterNext' == api: + name = 'filer:/OpenStack_testproj/volume-00000001/volume-00000001' + body = """ + + + 0 + %s + + + 1 + """ % name + elif 'DatasetMemberListInfoIterEnd' == api: + body = """""" + elif 'HostListInfoIterStart' == api: + body = """ + 1 + host + """ + elif 'HostListInfoIterNext' == api: + body = """ + + + 1.2.3.4 + 0 + filer + + + 1 + """ + elif 'HostListInfoIterEnd' == api: + body = """""" + elif 'LunListInfoIterStart' == api: + body = """ + 1 + lun + """ + elif 'LunListInfoIterNext' == api: + path = 'OpenStack_testproj/volume-00000001/volume-00000001' + body = """ + + + 0 + %s + + + 1 + """ % path + elif 'LunListInfoIterEnd' == api: + body = """""" + elif 'ApiProxy' == api: + names = body.xpath('na:ApiProxy/na:Request/na:Name', + namespaces=nsmap) + proxy = names[0].text + if 'igroup-list-info' == proxy: + igroup = 'openstack-iqn.1993-08.org.debian:01:23456789' + initiator = 'iqn.1993-08.org.debian:01:23456789' + proxy_body = """ + + %s + iscsi + linux + + + %s + + + + """ % (igroup, initiator) + elif 'igroup-create' == proxy: + proxy_body = '' + elif 'igroup-add' == proxy: + proxy_body = '' + elif 'lun-map-list-info' == proxy: + proxy_body = '' + elif 'lun-map' == proxy: + proxy_body = '0' + elif 'lun-unmap' == proxy: + proxy_body = '' + elif 'iscsi-portal-list-info' == proxy: + proxy_body = """ + + 1.2.3.4 + 3260 + 1000 + + """ + elif 'iscsi-node-get-name' == proxy: + target = 'iqn.1992-08.com.netapp:sn.111111111' + proxy_body = '%s' % target + else: + # Unknown proxy API + s.send_response(500) + s.end_headers + return + api = api + ':' + proxy + proxy_header = '' + proxy_trailer = """passed + """ + body = proxy_header + proxy_body + proxy_trailer + else: + # Unknown API + s.send_response(500) + s.end_headers + return + s.send_response(200) + s.send_header("Content-Type", "text/xml; charset=utf-8") + s.end_headers() + s.wfile.write(RESPONSE_PREFIX) + s.wfile.write(body) + s.wfile.write(RESPONSE_SUFFIX) + + +class FakeHttplibSocket(object): + """A fake socket implementation for httplib.HTTPResponse""" + def __init__(self, value): + self._rbuffer = StringIO.StringIO(value) + self._wbuffer = StringIO.StringIO('') + oldclose = self._wbuffer.close + + def newclose(): + self.result = self._wbuffer.getvalue() + oldclose() + self._wbuffer.close = newclose + + def makefile(self, mode, _other): + """Returns the socket's internal buffer""" + if mode == 'r' or mode == 'rb': + return self._rbuffer + if mode == 'w' or mode == 'wb': + return self._wbuffer + + +class FakeHTTPConnection(object): + """A fake httplib.HTTPConnection for netapp tests + + Requests made via this connection actually get translated and routed into + the fake Dfm handler above, we then turn the response into + the httplib.HTTPResponse that the caller expects. + """ + def __init__(self, host, timeout=None): + self.host = host + + def request(self, method, path, data=None, headers=None): + if not headers: + headers = {} + req_str = '%s %s HTTP/1.1\r\n' % (method, path) + for key, value in headers.iteritems(): + req_str += "%s: %s\r\n" % (key, value) + if data: + req_str += '\r\n%s' % data + + # NOTE(vish): normally the http transport normailizes from unicode + sock = FakeHttplibSocket(req_str.decode("latin-1").encode("utf-8")) + # NOTE(vish): stop the server from trying to look up address from + # the fake socket + FakeDfmServerHandler.address_string = lambda x: '127.0.0.1' + self.app = FakeDfmServerHandler(sock, '127.0.0.1:8088', None) + + self.sock = FakeHttplibSocket(sock.result) + self.http_response = httplib.HTTPResponse(self.sock) + + def set_debuglevel(self, level): + pass + + def getresponse(self): + self.http_response.begin() + return self.http_response + + def getresponsebody(self): + return self.sock.result + + +class NetAppDriverTestCase(test.TestCase): + """Test case for NetAppISCSIDriver""" + STORAGE_SERVICE = 'Thin Provisioned Space for VMFS Datastores' + PROJECT_ID = 'testproj' + VOLUME_NAME = 'volume-00000001' + VOLUME_SIZE = 2147483648L # 2 GB + INITIATOR = 'iqn.1993-08.org.debian:01:23456789' + + def setUp(self): + super(NetAppDriverTestCase, self).setUp() + driver = netapp.NetAppISCSIDriver() + self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection) + driver._create_client('http://localhost:8088/dfm.wsdl', + 'root', 'password', 'localhost', 8088) + driver._set_storage_service(self.STORAGE_SERVICE) + self.driver = driver + + def test_connect(self): + self.driver.check_for_setup_error() + + def test_create_destroy(self): + self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, + self.VOLUME_SIZE) + self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) + + def test_map_unmap(self): + self.driver._provision(self.VOLUME_NAME, None, self.PROJECT_ID, + self.VOLUME_SIZE) + volume = {'name': self.VOLUME_NAME, 'project_id': self.PROJECT_ID, + 'id': 0, 'provider_auth': None} + updates = self.driver._get_export(volume) + self.assertTrue(updates['provider_location']) + volume['provider_location'] = updates['provider_location'] + connector = {'initiator': self.INITIATOR} + connection_info = self.driver.initialize_connection(volume, connector) + self.assertEqual(connection_info['driver_volume_type'], 'iscsi') + properties = connection_info['data'] + self.driver.terminate_connection(volume, connector) + self.driver._remove_destroy(self.VOLUME_NAME, self.PROJECT_ID) diff --git a/cinder/tests/test_nexenta.py b/cinder/tests/test_nexenta.py new file mode 100644 index 00000000000..5a33318b1c9 --- /dev/null +++ b/cinder/tests/test_nexenta.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit tests for OpenStack Cinder volume driver +""" + +import base64 +import urllib2 + +import cinder.flags +import cinder.test +from cinder.volume import nexenta +from cinder.volume.nexenta import volume +from cinder.volume.nexenta import jsonrpc + +FLAGS = cinder.flags.FLAGS + + +class TestNexentaDriver(cinder.test.TestCase): + TEST_VOLUME_NAME = 'volume1' + TEST_VOLUME_NAME2 = 'volume2' + TEST_SNAPSHOT_NAME = 'snapshot1' + TEST_VOLUME_REF = { + 'name': TEST_VOLUME_NAME, + 'size': 1, + } + TEST_VOLUME_REF2 = { + 'name': TEST_VOLUME_NAME2, + 'size': 1, + } + TEST_SNAPSHOT_REF = { + 'name': TEST_SNAPSHOT_NAME, + 'volume_name': TEST_VOLUME_NAME, + } + + def __init__(self, method): + super(TestNexentaDriver, self).__init__(method) + + def setUp(self): + super(TestNexentaDriver, self).setUp() + self.flags( + nexenta_host='1.1.1.1', + nexenta_volume='cinder', + nexenta_target_prefix='iqn:', + nexenta_target_group_prefix='cinder/', + nexenta_blocksize='8K', + nexenta_sparse=True, + ) + self.nms_mock = self.mox.CreateMockAnything() + for mod in ['volume', 'zvol', 'iscsitarget', + 'stmf', 'scsidisk', 'snapshot']: + setattr(self.nms_mock, mod, self.mox.CreateMockAnything()) + self.stubs.Set(jsonrpc, 'NexentaJSONProxy', + lambda *_, **__: self.nms_mock) + self.drv = volume.NexentaDriver() + self.drv.do_setup({}) + + def test_setup_error(self): + self.nms_mock.volume.object_exists('cinder').AndReturn(True) + self.mox.ReplayAll() + self.drv.check_for_setup_error() + + def test_setup_error_fail(self): + self.nms_mock.volume.object_exists('cinder').AndReturn(False) + self.mox.ReplayAll() + self.assertRaises(LookupError, self.drv.check_for_setup_error) + + def test_local_path(self): + self.assertRaises(NotImplementedError, self.drv.local_path, '') + + def test_create_volume(self): + self.nms_mock.zvol.create('cinder/volume1', '1G', '8K', True) + self.mox.ReplayAll() + self.drv.create_volume(self.TEST_VOLUME_REF) + + def test_delete_volume(self): + self.nms_mock.zvol.destroy('cinder/volume1', '') + self.mox.ReplayAll() + self.drv.delete_volume(self.TEST_VOLUME_REF) + + def test_create_snapshot(self): + self.nms_mock.zvol.create_snapshot('cinder/volume1', 'snapshot1', '') + self.mox.ReplayAll() + self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) + + def test_create_volume_from_snapshot(self): + self.nms_mock.zvol.clone('cinder/volume1@snapshot1', 'cinder/volume2') + self.mox.ReplayAll() + self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, + self.TEST_SNAPSHOT_REF) + + def test_delete_snapshot(self): + self.nms_mock.snapshot.destroy('cinder/volume1@snapshot1', '') + self.mox.ReplayAll() + self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) + + _CREATE_EXPORT_METHODS = [ + ('iscsitarget', 'create_target', ({'target_name': 'iqn:volume1'},), + u'Unable to create iscsi target\n' + u' iSCSI target iqn.1986-03.com.sun:02:cinder-volume1 already' + u' configured\n' + u' itadm create-target failed with error 17\n', + ), + ('stmf', 'create_targetgroup', ('cinder/volume1',), + u'Unable to create targetgroup: stmfadm: cinder/volume1:' + u' already exists\n', + ), + ('stmf', 'add_targetgroup_member', ('cinder/volume1', 'iqn:volume1'), + u'Unable to add member to targetgroup: stmfadm:' + u' iqn.1986-03.com.sun:02:cinder-volume1: already exists\n', + ), + ('scsidisk', 'create_lu', ('cinder/volume1', {}), + u"Unable to create lu with zvol 'cinder/volume1':\n" + u" sbdadm: filename /dev/zvol/rdsk/cinder/volume1: in use\n", + ), + ('scsidisk', 'add_lun_mapping_entry', ('cinder/volume1', { + 'target_group': 'cinder/volume1', 'lun': '0'}), + u"Unable to add view to zvol 'cinder/volume1' (LUNs in use: ):\n" + u" stmfadm: view entry exists\n", + ), + ] + + def _stub_export_method(self, module, method, args, error, fail=False): + m = getattr(self.nms_mock, module) + m = getattr(m, method) + mock = m(*args) + if fail: + mock.AndRaise(nexenta.NexentaException(error)) + + def _stub_all_export_methods(self, fail=False): + for params in self._CREATE_EXPORT_METHODS: + self._stub_export_method(*params, fail=fail) + + def test_create_export(self): + self._stub_all_export_methods() + self.mox.ReplayAll() + retval = self.drv.create_export({}, self.TEST_VOLUME_REF) + self.assertEquals(retval, + {'provider_location': + '%s:%s,1 %s%s' % (FLAGS.nexenta_host, + FLAGS.nexenta_iscsi_target_portal_port, + FLAGS.nexenta_target_prefix, + self.TEST_VOLUME_NAME)}) + + def __get_test(i): + def _test_create_export_fail(self): + for params in self._CREATE_EXPORT_METHODS[:i]: + self._stub_export_method(*params) + self._stub_export_method(*self._CREATE_EXPORT_METHODS[i], + fail=True) + self.mox.ReplayAll() + self.assertRaises(nexenta.NexentaException, + self.drv.create_export, {}, self.TEST_VOLUME_REF) + return _test_create_export_fail + + for i in range(len(_CREATE_EXPORT_METHODS)): + locals()['test_create_export_fail_%d' % i] = __get_test(i) + + def test_ensure_export(self): + self._stub_all_export_methods(fail=True) + self.mox.ReplayAll() + self.drv.ensure_export({}, self.TEST_VOLUME_REF) + + def test_remove_export(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup('cinder/volume1') + self.nms_mock.iscsitarget.delete_target('iqn:volume1') + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + def test_remove_export_fail_0(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup('cinder/volume1').AndRaise( + nexenta.NexentaException()) + self.nms_mock.iscsitarget.delete_target('iqn:volume1') + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + def test_remove_export_fail_1(self): + self.nms_mock.scsidisk.delete_lu('cinder/volume1') + self.nms_mock.stmf.destroy_targetgroup('cinder/volume1') + self.nms_mock.iscsitarget.delete_target('iqn:volume1').AndRaise( + nexenta.NexentaException()) + self.mox.ReplayAll() + self.drv.remove_export({}, self.TEST_VOLUME_REF) + + +class TestNexentaJSONRPC(cinder.test.TestCase): + URL = 'http://example.com/' + URL_S = 'https://example.com/' + USER = 'user' + PASSWORD = 'password' + HEADERS = {'Authorization': 'Basic %s' % (base64.b64encode( + ':'.join((USER, PASSWORD))),), + 'Content-Type': 'application/json'} + REQUEST = 'the request' + + def setUp(self): + super(TestNexentaJSONRPC, self).setUp() + self.proxy = jsonrpc.NexentaJSONProxy( + self.URL, self.USER, self.PASSWORD, auto=True) + self.mox.StubOutWithMock(urllib2, 'Request', True) + self.mox.StubOutWithMock(urllib2, 'urlopen') + self.resp_mock = self.mox.CreateMockAnything() + self.resp_info_mock = self.mox.CreateMockAnything() + self.resp_mock.info().AndReturn(self.resp_info_mock) + urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) + + def test_call(self): + urllib2.Request(self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + self.mox.ReplayAll() + result = self.proxy('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_deep(self): + urllib2.Request(self.URL, + '{"object": "obj1.subobj", "params": ["arg1", "arg2"],' + ' "method": "meth"}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + self.mox.ReplayAll() + result = self.proxy.obj1.subobj.meth('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_auto(self): + urllib2.Request(self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + urllib2.Request(self.URL_S, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = 'EOF in headers' + self.resp_mock.read().AndReturn( + '{"error": null, "result": "the result"}') + urllib2.urlopen(self.REQUEST).AndReturn(self.resp_mock) + self.mox.ReplayAll() + result = self.proxy('arg1', 'arg2') + self.assertEquals("the result", result) + + def test_call_error(self): + urllib2.Request(self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = '' + self.resp_mock.read().AndReturn( + '{"error": {"message": "the error"}, "result": "the result"}') + self.mox.ReplayAll() + self.assertRaises(jsonrpc.NexentaJSONException, + self.proxy, 'arg1', 'arg2') + + def test_call_fail(self): + urllib2.Request(self.URL, + '{"object": null, "params": ["arg1", "arg2"], "method": null}', + self.HEADERS).AndReturn(self.REQUEST) + self.resp_info_mock.status = 'EOF in headers' + self.proxy.auto = False + self.mox.ReplayAll() + self.assertRaises(jsonrpc.NexentaJSONException, + self.proxy, 'arg1', 'arg2') diff --git a/cinder/tests/test_notifier.py b/cinder/tests/test_notifier.py new file mode 100644 index 00000000000..e147f786c70 --- /dev/null +++ b/cinder/tests/test_notifier.py @@ -0,0 +1,133 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import cinder +from cinder import flags +from cinder import log +import cinder.notifier.no_op_notifier +from cinder.notifier import api as notifier_api +from cinder import test + + +class NotifierTestCase(test.TestCase): + """Test case for notifications""" + def setUp(self): + super(NotifierTestCase, self).setUp() + self.flags(notification_driver='cinder.notifier.no_op_notifier') + + def test_send_notification(self): + self.notify_called = False + + def mock_notify(cls, *args): + self.notify_called = True + + self.stubs.Set(cinder.notifier.no_op_notifier, 'notify', + mock_notify) + + notifier_api.notify('publisher_id', 'event_type', + cinder.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_called, True) + + def test_verify_message_format(self): + """A test to ensure changing the message format is prohibitively + annoying""" + + def message_assert(message): + fields = [('publisher_id', 'publisher_id'), + ('event_type', 'event_type'), + ('priority', 'WARN'), + ('payload', dict(a=3))] + for k, v in fields: + self.assertEqual(message[k], v) + self.assertTrue(len(message['message_id']) > 0) + self.assertTrue(len(message['timestamp']) > 0) + + self.stubs.Set(cinder.notifier.no_op_notifier, 'notify', + message_assert) + notifier_api.notify('publisher_id', 'event_type', + cinder.notifier.api.WARN, dict(a=3)) + + def test_send_rabbit_notification(self): + self.stubs.Set(cinder.flags.FLAGS, 'notification_driver', + 'cinder.notifier.rabbit_notifier') + self.mock_notify = False + + def mock_notify(cls, *args): + self.mock_notify = True + + self.stubs.Set(cinder.rpc, 'notify', mock_notify) + notifier_api.notify('publisher_id', 'event_type', + cinder.notifier.api.WARN, dict(a=3)) + + self.assertEqual(self.mock_notify, True) + + def test_invalid_priority(self): + self.assertRaises(cinder.notifier.api.BadPriorityException, + notifier_api.notify, 'publisher_id', + 'event_type', 'not a priority', dict(a=3)) + + def test_rabbit_priority_queue(self): + flags.DECLARE('notification_topics', 'cinder.notifier.rabbit_notifier') + self.stubs.Set(cinder.flags.FLAGS, 'notification_driver', + 'cinder.notifier.rabbit_notifier') + self.stubs.Set(cinder.flags.FLAGS, 'notification_topics', + ['testnotify', ]) + + self.test_topic = None + + def mock_notify(context, topic, msg): + self.test_topic = topic + + self.stubs.Set(cinder.rpc, 'notify', mock_notify) + notifier_api.notify('publisher_id', 'event_type', 'DEBUG', dict(a=3)) + self.assertEqual(self.test_topic, 'testnotify.debug') + + def test_error_notification(self): + self.stubs.Set(cinder.flags.FLAGS, 'notification_driver', + 'cinder.notifier.rabbit_notifier') + self.stubs.Set(cinder.flags.FLAGS, 'publish_errors', True) + LOG = log.getLogger('cinder') + log.setup() + msgs = [] + + def mock_notify(context, topic, data): + msgs.append(data) + + self.stubs.Set(cinder.rpc, 'notify', mock_notify) + LOG.error('foo') + self.assertEqual(1, len(msgs)) + msg = msgs[0] + self.assertEqual(msg['event_type'], 'error_notification') + self.assertEqual(msg['priority'], 'ERROR') + self.assertEqual(msg['payload']['error'], 'foo') + + def test_send_notification_by_decorator(self): + self.notify_called = False + + def example_api(arg1, arg2): + return arg1 + arg2 + + example_api = cinder.notifier.api.notify_decorator( + 'example_api', + example_api) + + def mock_notify(cls, *args): + self.notify_called = True + + self.stubs.Set(cinder.notifier.no_op_notifier, 'notify', + mock_notify) + + self.assertEqual(3, example_api(1, 2)) + self.assertEqual(self.notify_called, True) diff --git a/cinder/tests/test_nova_rootwrap.py b/cinder/tests/test_nova_rootwrap.py new file mode 100644 index 00000000000..42fd5ee5f84 --- /dev/null +++ b/cinder/tests/test_nova_rootwrap.py @@ -0,0 +1,133 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import subprocess + +from cinder.rootwrap import filters +from cinder.rootwrap import wrapper +from cinder import test + + +class RootwrapTestCase(test.TestCase): + + def setUp(self): + super(RootwrapTestCase, self).setUp() + self.filters = [ + filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'), + filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"), + filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'), + filters.CommandFilter("/nonexistant/cat", "root"), + filters.CommandFilter("/bin/cat", "root") # Keep this one last + ] + + def test_RegExpFilter_match(self): + usercmd = ["ls", "/root"] + filtermatch = wrapper.match_filter(self.filters, usercmd) + self.assertFalse(filtermatch is None) + self.assertEqual(filtermatch.get_command(usercmd), + ["/bin/ls", "/root"]) + + def test_RegExpFilter_reject(self): + usercmd = ["ls", "root"] + filtermatch = wrapper.match_filter(self.filters, usercmd) + self.assertTrue(filtermatch is None) + + def test_missing_command(self): + valid_but_missing = ["foo_bar_not_exist"] + invalid = ["foo_bar_not_exist_and_not_matched"] + filtermatch = wrapper.match_filter(self.filters, valid_but_missing) + self.assertTrue(filtermatch is not None) + filtermatch = wrapper.match_filter(self.filters, invalid) + self.assertTrue(filtermatch is None) + + def test_DnsmasqFilter(self): + usercmd = ['FLAGFILE=A', 'NETWORK_ID=foobar', 'dnsmasq', 'foo'] + f = filters.DnsmasqFilter("/usr/bin/dnsmasq", "root") + self.assertTrue(f.match(usercmd)) + self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo']) + env = f.get_environment(usercmd) + self.assertEqual(env.get('FLAGFILE'), 'A') + self.assertEqual(env.get('NETWORK_ID'), 'foobar') + + @test.skip_if(not os.path.exists("/proc/%d" % os.getpid()), + "Test requires /proc filesystem (procfs)") + def test_KillFilter(self): + p = subprocess.Popen(["/bin/sleep", "5"]) + f = filters.KillFilter("/bin/kill", "root", + ["-ALRM"], + ["/bin/sleep"]) + usercmd = ['kill', '-9', p.pid] + # Incorrect signal should fail + self.assertFalse(f.match(usercmd)) + usercmd = ['kill', p.pid] + # Providing no signal should fail + self.assertFalse(f.match(usercmd)) + + f = filters.KillFilter("/bin/kill", "root", + ["-9", ""], + ["/bin/sleep"]) + usercmd = ['kill', '-9', os.getpid()] + # Our own PID does not match /bin/sleep, so it should fail + self.assertFalse(f.match(usercmd)) + usercmd = ['kill', '-9', 999999] + # Nonexistant PID should fail + self.assertFalse(f.match(usercmd)) + usercmd = ['kill', p.pid] + # Providing no signal should work + self.assertTrue(f.match(usercmd)) + usercmd = ['kill', '-9', p.pid] + # Providing -9 signal should work + self.assertTrue(f.match(usercmd)) + + def test_KillFilter_no_raise(self): + """Makes sure ValueError from bug 926412 is gone""" + f = filters.KillFilter("/bin/kill", "root", [""]) + # Providing anything other than kill should be False + usercmd = ['notkill', 999999] + self.assertFalse(f.match(usercmd)) + # Providing something that is not a pid should be False + usercmd = ['kill', 'notapid'] + self.assertFalse(f.match(usercmd)) + + def test_KillFilter_deleted_exe(self): + """Makes sure deleted exe's are killed correctly""" + # See bug #967931. + def fake_readlink(blah): + return '/bin/commandddddd (deleted)' + + f = filters.KillFilter("/bin/kill", "root", + [""], + ["/bin/commandddddd"]) + usercmd = ['kill', 1234] + # Providing no signal should work + self.stubs.Set(os, 'readlink', fake_readlink) + self.assertTrue(f.match(usercmd)) + + def test_ReadFileFilter(self): + goodfn = '/good/file.name' + f = filters.ReadFileFilter(goodfn) + usercmd = ['cat', '/bad/file'] + self.assertFalse(f.match(['cat', '/bad/file'])) + usercmd = ['cat', goodfn] + self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn]) + self.assertTrue(f.match(usercmd)) + + def test_skips(self): + # Check that all filters are skipped and that the last matches + usercmd = ["cat", "/"] + filtermatch = wrapper.match_filter(self.filters, usercmd) + self.assertTrue(filtermatch is self.filters[-1]) diff --git a/cinder/tests/test_policy.py b/cinder/tests/test_policy.py new file mode 100644 index 00000000000..4cb66f08811 --- /dev/null +++ b/cinder/tests/test_policy.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Piston Cloud Computing, Inc. +# All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test of Policy Engine For Cinder""" + +import os.path +import StringIO +import urllib2 + +from cinder.common import policy as common_policy +from cinder import context +from cinder import exception +from cinder import flags +import cinder.common.policy +from cinder import policy +from cinder import test +from cinder import utils + +FLAGS = flags.FLAGS + + +class PolicyFileTestCase(test.TestCase): + def setUp(self): + super(PolicyFileTestCase, self).setUp() + policy.reset() + self.context = context.RequestContext('fake', 'fake') + self.target = {} + + def tearDown(self): + super(PolicyFileTestCase, self).tearDown() + policy.reset() + + def test_modified_policy_reloads(self): + with utils.tempdir() as tmpdir: + tmpfilename = os.path.join(tmpdir, 'policy') + self.flags(policy_file=tmpfilename) + + action = "example:test" + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": []}""") + policy.enforce(self.context, action, self.target) + with open(tmpfilename, "w") as policyfile: + policyfile.write("""{"example:test": ["false:false"]}""") + # NOTE(vish): reset stored policy cache so we don't have to + # sleep(1) + policy._POLICY_CACHE = {} + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + +class PolicyTestCase(test.TestCase): + def setUp(self): + super(PolicyTestCase, self).setUp() + policy.reset() + # NOTE(vish): preload rules to circumvent reloading from file + policy.init() + rules = { + "true": [], + "example:allowed": [], + "example:denied": [["false:false"]], + "example:get_http": [["http:http://www.example.com"]], + "example:my_file": [["role:compute_admin"], + ["project_id:%(project_id)s"]], + "example:early_and_fail": [["false:false", "rule:true"]], + "example:early_or_success": [["rule:true"], ["false:false"]], + "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], + "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], + } + # NOTE(vish): then overload underlying brain + common_policy.set_brain(common_policy.HttpBrain(rules)) + self.context = context.RequestContext('fake', 'fake', roles=['member']) + self.target = {} + + def tearDown(self): + policy.reset() + super(PolicyTestCase, self).tearDown() + + def test_enforce_nonexistent_action_throws(self): + action = "example:noexist" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_bad_action_throws(self): + action = "example:denied" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_enforce_good_action(self): + action = "example:allowed" + policy.enforce(self.context, action, self.target) + + def test_enforce_http_true(self): + + def fakeurlopen(url, post_data): + return StringIO.StringIO("True") + self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + action = "example:get_http" + target = {} + result = policy.enforce(self.context, action, target) + self.assertEqual(result, None) + + def test_enforce_http_false(self): + + def fakeurlopen(url, post_data): + return StringIO.StringIO("False") + self.stubs.Set(urllib2, 'urlopen', fakeurlopen) + action = "example:get_http" + target = {} + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, target) + + def test_templatized_enforcement(self): + target_mine = {'project_id': 'fake'} + target_not_mine = {'project_id': 'another'} + action = "example:my_file" + policy.enforce(self.context, action, target_mine) + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, target_not_mine) + + def test_early_AND_enforcement(self): + action = "example:early_and_fail" + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, action, self.target) + + def test_early_OR_enforcement(self): + action = "example:early_or_success" + policy.enforce(self.context, action, self.target) + + def test_ignore_case_role_check(self): + lowercase_action = "example:lowercase_admin" + uppercase_action = "example:uppercase_admin" + # NOTE(dprince) we mix case in the Admin role here to ensure + # case is ignored + admin_context = context.RequestContext('admin', + 'fake', + roles=['AdMiN']) + policy.enforce(admin_context, lowercase_action, self.target) + policy.enforce(admin_context, uppercase_action, self.target) + + +class DefaultPolicyTestCase(test.TestCase): + + def setUp(self): + super(DefaultPolicyTestCase, self).setUp() + policy.reset() + policy.init() + + self.rules = { + "default": [], + "example:exist": [["false:false"]] + } + + self._set_brain('default') + + self.context = context.RequestContext('fake', 'fake') + + def _set_brain(self, default_rule): + brain = cinder.common.policy.HttpBrain(self.rules, default_rule) + cinder.common.policy.set_brain(brain) + + def tearDown(self): + super(DefaultPolicyTestCase, self).tearDown() + policy.reset() + + def test_policy_called(self): + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, "example:exist", {}) + + def test_not_found_policy_calls_default(self): + policy.enforce(self.context, "example:noexist", {}) + + def test_default_not_found(self): + self._set_brain("default_noexist") + self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, + self.context, "example:noexist", {}) diff --git a/cinder/tests/test_quota.py b/cinder/tests/test_quota.py new file mode 100644 index 00000000000..6e794d6e4e7 --- /dev/null +++ b/cinder/tests/test_quota.py @@ -0,0 +1,316 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import context +from cinder import db +from cinder import flags +from cinder import quota +from cinder import exception +from cinder import rpc +from cinder import test +from cinder import volume +from cinder.scheduler import driver as scheduler_driver + + +FLAGS = flags.FLAGS + + +class GetQuotaTestCase(test.TestCase): + def setUp(self): + super(GetQuotaTestCase, self).setUp() + self.flags(quota_instances=10, + quota_cores=20, + quota_ram=50 * 1024, + quota_volumes=10, + quota_gigabytes=1000, + quota_floating_ips=10, + quota_security_groups=10, + quota_security_group_rules=20, + quota_metadata_items=128, + quota_injected_files=5, + quota_injected_file_content_bytes=10 * 1024) + self.context = context.RequestContext('admin', 'admin', is_admin=True) + + def _stub_class(self): + def fake_quota_class_get_all_by_name(context, quota_class): + result = dict(class_name=quota_class) + if quota_class == 'test_class': + result.update( + instances=5, + cores=10, + ram=25 * 1024, + volumes=5, + gigabytes=500, + floating_ips=5, + quota_security_groups=10, + quota_security_group_rules=20, + metadata_items=64, + injected_files=2, + injected_file_content_bytes=5 * 1024, + invalid_quota=100, + ) + return result + + self.stubs.Set(db, 'quota_class_get_all_by_name', + fake_quota_class_get_all_by_name) + + def _stub_project(self, override=False): + def fake_quota_get_all_by_project(context, project_id): + result = dict(project_id=project_id) + if override: + result.update( + instances=2, + cores=5, + ram=12 * 1024, + volumes=2, + gigabytes=250, + floating_ips=2, + security_groups=5, + security_group_rules=10, + metadata_items=32, + injected_files=1, + injected_file_content_bytes=2 * 1024, + invalid_quota=50, + ) + return result + + self.stubs.Set(db, 'quota_get_all_by_project', + fake_quota_get_all_by_project) + + def test_default_quotas(self): + result = quota._get_default_quotas() + self.assertEqual(result, dict( + instances=10, + cores=20, + ram=50 * 1024, + volumes=10, + gigabytes=1000, + floating_ips=10, + security_groups=10, + security_group_rules=20, + metadata_items=128, + injected_files=5, + injected_file_content_bytes=10 * 1024, + )) + + def test_default_quotas_unlimited(self): + self.flags(quota_instances=-1, + quota_cores=-1, + quota_ram=-1, + quota_volumes=-1, + quota_gigabytes=-1, + quota_floating_ips=-1, + quota_security_groups=-1, + quota_security_group_rules=-1, + quota_metadata_items=-1, + quota_injected_files=-1, + quota_injected_file_content_bytes=-1) + result = quota._get_default_quotas() + self.assertEqual(result, dict( + instances=-1, + cores=-1, + ram=-1, + volumes=-1, + gigabytes=-1, + floating_ips=-1, + security_groups=-1, + security_group_rules=-1, + metadata_items=-1, + injected_files=-1, + injected_file_content_bytes=-1, + )) + + def test_class_quotas_noclass(self): + self._stub_class() + result = quota.get_class_quotas(self.context, 'noclass') + self.assertEqual(result, dict( + instances=10, + cores=20, + ram=50 * 1024, + volumes=10, + gigabytes=1000, + floating_ips=10, + security_groups=10, + security_group_rules=20, + metadata_items=128, + injected_files=5, + injected_file_content_bytes=10 * 1024, + )) + + def test_class_quotas(self): + self._stub_class() + result = quota.get_class_quotas(self.context, 'test_class') + self.assertEqual(result, dict( + instances=5, + cores=10, + ram=25 * 1024, + volumes=5, + gigabytes=500, + floating_ips=5, + security_groups=10, + security_group_rules=20, + metadata_items=64, + injected_files=2, + injected_file_content_bytes=5 * 1024, + )) + + def test_project_quotas_defaults_noclass(self): + self._stub_class() + self._stub_project() + result = quota.get_project_quotas(self.context, 'admin') + self.assertEqual(result, dict( + instances=10, + cores=20, + ram=50 * 1024, + volumes=10, + gigabytes=1000, + floating_ips=10, + security_groups=10, + security_group_rules=20, + metadata_items=128, + injected_files=5, + injected_file_content_bytes=10 * 1024, + )) + + def test_project_quotas_overrides_noclass(self): + self._stub_class() + self._stub_project(True) + result = quota.get_project_quotas(self.context, 'admin') + self.assertEqual(result, dict( + instances=2, + cores=5, + ram=12 * 1024, + volumes=2, + gigabytes=250, + floating_ips=2, + security_groups=5, + security_group_rules=10, + metadata_items=32, + injected_files=1, + injected_file_content_bytes=2 * 1024, + )) + + def test_project_quotas_defaults_withclass(self): + self._stub_class() + self._stub_project() + self.context.quota_class = 'test_class' + result = quota.get_project_quotas(self.context, 'admin') + self.assertEqual(result, dict( + instances=5, + cores=10, + ram=25 * 1024, + volumes=5, + gigabytes=500, + floating_ips=5, + security_groups=10, + security_group_rules=20, + metadata_items=64, + injected_files=2, + injected_file_content_bytes=5 * 1024, + )) + + def test_project_quotas_overrides_withclass(self): + self._stub_class() + self._stub_project(True) + self.context.quota_class = 'test_class' + result = quota.get_project_quotas(self.context, 'admin') + self.assertEqual(result, dict( + instances=2, + cores=5, + ram=12 * 1024, + volumes=2, + gigabytes=250, + floating_ips=2, + security_groups=5, + security_group_rules=10, + metadata_items=32, + injected_files=1, + injected_file_content_bytes=2 * 1024, + )) + + +class QuotaTestCase(test.TestCase): + + class StubImageService(object): + + def show(self, *args, **kwargs): + return {"properties": {}} + + def setUp(self): + super(QuotaTestCase, self).setUp() + self.flags(quota_volumes=2, + quota_gigabytes=20) + self.user_id = 'admin' + self.project_id = 'admin' + self.context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) + orig_rpc_call = rpc.call + + def rpc_call_wrapper(context, topic, msg): + """Stub out the scheduler creating the instance entry""" + if (topic == FLAGS.scheduler_topic and + msg['method'] == 'run_instance'): + scheduler = scheduler_driver.Scheduler + instance = scheduler().create_instance_db_entry( + context, + msg['args']['request_spec']) + return [scheduler_driver.encode_instance(instance)] + else: + return orig_rpc_call(context, topic, msg) + + self.stubs.Set(rpc, 'call', rpc_call_wrapper) + + def _create_volume(self, size=10): + """Create a test volume""" + vol = {} + vol['user_id'] = self.user_id + vol['project_id'] = self.project_id + vol['size'] = size + return db.volume_create(self.context, vol)['id'] + + def test_unlimited_volumes(self): + self.flags(quota_volumes=10, quota_gigabytes=-1) + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 10) + db.quota_create(self.context, self.project_id, 'volumes', -1) + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 100) + volumes = quota.allowed_volumes(self.context, 101, 1) + self.assertEqual(volumes, 101) + + def test_too_many_volumes(self): + volume_ids = [] + for i in range(FLAGS.quota_volumes): + volume_id = self._create_volume() + volume_ids.append(volume_id) + self.assertRaises(exception.QuotaError, + volume.API().create, + self.context, 10, '', '', None) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) + + def test_too_many_gigabytes(self): + volume_ids = [] + volume_id = self._create_volume(size=20) + volume_ids.append(volume_id) + self.assertRaises(exception.QuotaError, + volume.API().create, + self.context, 10, '', '', None) + for volume_id in volume_ids: + db.volume_destroy(self.context, volume_id) diff --git a/cinder/tests/test_service.py b/cinder/tests/test_service.py new file mode 100644 index 00000000000..c0c2561e43a --- /dev/null +++ b/cinder/tests/test_service.py @@ -0,0 +1,221 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for remote procedure calls using queue +""" + +import mox + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder.openstack.common import cfg +from cinder import test +from cinder import service +from cinder import manager +from cinder import wsgi + + +test_service_opts = [ + cfg.StrOpt("fake_manager", + default="cinder.tests.test_service.FakeManager", + help="Manager for testing"), + cfg.StrOpt("test_service_listen", + default=None, + help="Host to bind test service to"), + cfg.IntOpt("test_service_listen_port", + default=0, + help="Port number to bind test service to"), + ] + +flags.FLAGS.register_opts(test_service_opts) + + +class FakeManager(manager.Manager): + """Fake manager for tests""" + def test_method(self): + return 'manager' + + +class ExtendedService(service.Service): + def test_method(self): + return 'service' + + +class ServiceManagerTestCase(test.TestCase): + """Test cases for Services""" + + def test_message_gets_to_manager(self): + serv = service.Service('test', + 'test', + 'test', + 'cinder.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'manager') + + def test_override_manager_method(self): + serv = ExtendedService('test', + 'test', + 'test', + 'cinder.tests.test_service.FakeManager') + serv.start() + self.assertEqual(serv.test_method(), 'service') + + +class ServiceFlagsTestCase(test.TestCase): + def test_service_enabled_on_create_based_on_flag(self): + self.flags(enable_new_services=True) + host = 'foo' + binary = 'cinder-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(not ref['disabled']) + + def test_service_disabled_on_create_based_on_flag(self): + self.flags(enable_new_services=False) + host = 'foo' + binary = 'cinder-fake' + app = service.Service.create(host=host, binary=binary) + app.start() + app.stop() + ref = db.service_get(context.get_admin_context(), app.service_id) + db.service_destroy(context.get_admin_context(), app.service_id) + self.assert_(ref['disabled']) + + +class ServiceTestCase(test.TestCase): + """Test cases for Services""" + + def setUp(self): + super(ServiceTestCase, self).setUp() + self.mox.StubOutWithMock(service, 'db') + + def test_create(self): + host = 'foo' + binary = 'cinder-fake' + topic = 'fake' + + # NOTE(vish): Create was moved out of mox replay to make sure that + # the looping calls are created in StartService. + app = service.Service.create(host=host, binary=binary, topic=topic) + + self.assert_(app) + + def test_report_state_newly_disconnected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'cinder'} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'cinder', + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(Exception()) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'cinder.tests.test_service.FakeManager') + serv.start() + serv.report_state() + self.assert_(serv.model_disconnected) + + def test_report_state_newly_connected(self): + host = 'foo' + binary = 'bar' + topic = 'test' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'cinder'} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'cinder', + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + service.db.service_get(mox.IgnoreArg(), + service_ref['id']).AndReturn(service_ref) + service.db.service_update(mox.IgnoreArg(), service_ref['id'], + mox.ContainsKeyValue('report_count', 1)) + + self.mox.ReplayAll() + serv = service.Service(host, + binary, + topic, + 'cinder.tests.test_service.FakeManager') + serv.start() + serv.model_disconnected = True + serv.report_state() + + self.assert_(not serv.model_disconnected) + + +class TestWSGIService(test.TestCase): + + def setUp(self): + super(TestWSGIService, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + + def test_service_random_port(self): + test_service = service.WSGIService("test_service") + self.assertEquals(0, test_service.port) + test_service.start() + self.assertNotEqual(0, test_service.port) + test_service.stop() + + +class TestLauncher(test.TestCase): + + def setUp(self): + super(TestLauncher, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + self.service = service.WSGIService("test_service") + + def test_launch_app(self): + self.assertEquals(0, self.service.port) + launcher = service.Launcher() + launcher.launch_server(self.service) + self.assertEquals(0, self.service.port) + launcher.stop() diff --git a/cinder/tests/test_skip_examples.py b/cinder/tests/test_skip_examples.py new file mode 100644 index 00000000000..2e51aef3421 --- /dev/null +++ b/cinder/tests/test_skip_examples.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import test + + +class ExampleSkipTestCase(test.TestCase): + test_counter = 0 + + @test.skip_test("Example usage of @test.skip_test()") + def test_skip_test_example(self): + self.fail("skip_test failed to work properly.") + + @test.skip_if(True, "Example usage of @test.skip_if()") + def test_skip_if_example(self): + self.fail("skip_if failed to work properly.") + + @test.skip_unless(False, "Example usage of @test.skip_unless()") + def test_skip_unless_example(self): + self.fail("skip_unless failed to work properly.") + + @test.skip_if(False, "This test case should never be skipped.") + def test_001_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + @test.skip_unless(True, "This test case should never be skipped.") + def test_002_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + def test_003_verify_test_counter(self): + self.assertEquals(ExampleSkipTestCase.test_counter, 2, + "Tests were not skipped appropriately") diff --git a/cinder/tests/test_test.py b/cinder/tests/test_test.py new file mode 100644 index 00000000000..8ff84c1ad1c --- /dev/null +++ b/cinder/tests/test_test.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing base code.""" + +from cinder import rpc +from cinder import test + + +class IsolationTestCase(test.TestCase): + """Ensure that things are cleaned up after failed tests. + + These tests don't really do much here, but if isolation fails a bunch + of other tests should fail. + + """ + def test_service_isolation(self): + self.start_service('volume') + + def test_rpc_consumer_isolation(self): + class NeverCalled(object): + + def __getattribute__(*args): + assert False, "I should never get called." + + connection = rpc.create_connection(new=True) + proxy = NeverCalled() + connection.create_consumer('volume', proxy, fanout=False) + connection.consume_in_thread() diff --git a/cinder/tests/test_test_utils.py b/cinder/tests/test_test_utils.py new file mode 100644 index 00000000000..5e9063cc5a4 --- /dev/null +++ b/cinder/tests/test_test_utils.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import db +from cinder import test +from cinder.tests import utils as test_utils + + +class TestUtilsTestCase(test.TestCase): + def test_get_test_admin_context(self): + """get_test_admin_context's return value behaves like admin context""" + ctxt = test_utils.get_test_admin_context() + + # TODO(soren): This should verify the full interface context + # objects expose. + self.assertTrue(ctxt.is_admin) diff --git a/cinder/tests/test_utils.py b/cinder/tests/test_utils.py new file mode 100644 index 00000000000..ee946e7d9b3 --- /dev/null +++ b/cinder/tests/test_utils.py @@ -0,0 +1,1188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import __builtin__ +import datetime +import hashlib +import os +import os.path +import socket +import shutil +import StringIO +import tempfile + +import eventlet +from eventlet import greenpool +import iso8601 +import lockfile +import mox + +import cinder +from cinder import exception +from cinder import flags +from cinder import test +from cinder import utils + + +FLAGS = flags.FLAGS + + +class ExecuteTestCase(test.TestCase): + def test_retry_on_failure(self): + fd, tmpfilename = tempfile.mkstemp() + _, tmpfilename2 = tempfile.mkstemp() + try: + fp = os.fdopen(fd, 'w+') + fp.write('''#!/bin/sh +# If stdin fails to get passed during one of the runs, make a note. +if ! grep -q foo +then + echo 'failure' > "$1" +fi +# If stdin has failed to get passed during this or a previous run, exit early. +if grep failure "$1" +then + exit 1 +fi +runs="$(cat $1)" +if [ -z "$runs" ] +then + runs=0 +fi +runs=$(($runs + 1)) +echo $runs > "$1" +exit 1 +''') + fp.close() + os.chmod(tmpfilename, 0755) + self.assertRaises(exception.ProcessExecutionError, + utils.execute, + tmpfilename, tmpfilename2, attempts=10, + process_input='foo', + delay_on_retry=False) + fp = open(tmpfilename2, 'r+') + runs = fp.read() + fp.close() + self.assertNotEquals(runs.strip(), 'failure', 'stdin did not ' + 'always get passed ' + 'correctly') + runs = int(runs.strip()) + self.assertEquals(runs, 10, + 'Ran %d times instead of 10.' % (runs,)) + finally: + os.unlink(tmpfilename) + os.unlink(tmpfilename2) + + def test_unknown_kwargs_raises_error(self): + self.assertRaises(exception.Error, + utils.execute, + '/usr/bin/env', 'true', + this_is_not_a_valid_kwarg=True) + + def test_check_exit_code_boolean(self): + utils.execute('/usr/bin/env', 'false', check_exit_code=False) + self.assertRaises(exception.ProcessExecutionError, + utils.execute, + '/usr/bin/env', 'false', check_exit_code=True) + + def test_no_retry_on_success(self): + fd, tmpfilename = tempfile.mkstemp() + _, tmpfilename2 = tempfile.mkstemp() + try: + fp = os.fdopen(fd, 'w+') + fp.write('''#!/bin/sh +# If we've already run, bail out. +grep -q foo "$1" && exit 1 +# Mark that we've run before. +echo foo > "$1" +# Check that stdin gets passed correctly. +grep foo +''') + fp.close() + os.chmod(tmpfilename, 0755) + utils.execute(tmpfilename, + tmpfilename2, + process_input='foo', + attempts=2) + finally: + os.unlink(tmpfilename) + os.unlink(tmpfilename2) + + +class GetFromPathTestCase(test.TestCase): + def test_tolerates_nones(self): + f = utils.get_from_path + + input = [] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [None] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': None}] + self.assertEquals([], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': None}}] + self.assertEquals([{'b': None}], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': None}] + self.assertEquals([{'b': {'c': None}}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}] + self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': None}], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + def test_does_select(self): + f = utils.get_from_path + + input = [{'a': 'a_1'}] + self.assertEquals(['a_1'], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': 'b_1'}}] + self.assertEquals([{'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}] + self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': None}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a")) + self.assertEquals([{'c': 'c_1'}], f(input, "a/b")) + self.assertEquals(['c_1'], f(input, "a/b/c")) + + input = [{'a': {'b': {'c': 'c_1'}}}, + {'a': {'b': {'c': 'c_2'}}}] + self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}], + f(input, "a")) + self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b")) + self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c")) + + self.assertEquals([], f(input, "a/b/c/d")) + self.assertEquals([], f(input, "c/a/b/d")) + self.assertEquals([], f(input, "i/r/t")) + + def test_flattens_lists(self): + f = utils.get_from_path + + input = [{'a': [1, 2, 3]}] + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}] + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}] + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = [{'a': [1, 2, {'b': 'b_1'}]}] + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + def test_bad_xpath(self): + f = utils.get_from_path + + self.assertRaises(exception.Error, f, [], None) + self.assertRaises(exception.Error, f, [], "") + self.assertRaises(exception.Error, f, [], "/") + self.assertRaises(exception.Error, f, [], "/a") + self.assertRaises(exception.Error, f, [], "/a/") + self.assertRaises(exception.Error, f, [], "//") + self.assertRaises(exception.Error, f, [], "//a") + self.assertRaises(exception.Error, f, [], "a//a") + self.assertRaises(exception.Error, f, [], "a//a/") + self.assertRaises(exception.Error, f, [], "a/a/") + + def test_real_failure1(self): + # Real world failure case... + # We weren't coping when the input was a Dictionary instead of a List + # This led to test_accepts_dictionaries + f = utils.get_from_path + + inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}], + 'address': '192.168.0.3'}, + 'hostname': ''} + + private_ips = f(inst, 'fixed_ip/address') + public_ips = f(inst, 'fixed_ip/floating_ips/address') + self.assertEquals(['192.168.0.3'], private_ips) + self.assertEquals(['1.2.3.4'], public_ips) + + def test_accepts_dictionaries(self): + f = utils.get_from_path + + input = {'a': [1, 2, 3]} + self.assertEquals([1, 2, 3], f(input, "a")) + self.assertEquals([], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': {'b': [1, 2, 3]}} + self.assertEquals([{'b': [1, 2, 3]}], f(input, "a")) + self.assertEquals([1, 2, 3], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]} + self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b")) + self.assertEquals([], f(input, "a/b/c")) + + input = {'a': [1, 2, {'b': 'b_1'}]} + self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) + self.assertEquals(['b_1'], f(input, "a/b")) + + +class GenericUtilsTestCase(test.TestCase): + def test_parse_server_string(self): + result = utils.parse_server_string('::1') + self.assertEqual(('::1', ''), result) + result = utils.parse_server_string('[::1]:8773') + self.assertEqual(('::1', '8773'), result) + result = utils.parse_server_string('2001:db8::192.168.1.1') + self.assertEqual(('2001:db8::192.168.1.1', ''), result) + result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773') + self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) + result = utils.parse_server_string('192.168.1.1') + self.assertEqual(('192.168.1.1', ''), result) + result = utils.parse_server_string('192.168.1.2:8773') + self.assertEqual(('192.168.1.2', '8773'), result) + result = utils.parse_server_string('192.168.1.3') + self.assertEqual(('192.168.1.3', ''), result) + result = utils.parse_server_string('www.example.com:8443') + self.assertEqual(('www.example.com', '8443'), result) + result = utils.parse_server_string('www.example.com') + self.assertEqual(('www.example.com', ''), result) + # error case + result = utils.parse_server_string('www.exa:mple.com:8443') + self.assertEqual(('', ''), result) + + def test_hostname_unicode_sanitization(self): + hostname = u"\u7684.test.example.com" + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_periods(self): + hostname = "....test.example.com..." + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_dashes(self): + hostname = "----test.example.com---" + self.assertEqual("test.example.com", + utils.sanitize_hostname(hostname)) + + def test_hostname_sanitize_characters(self): + hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" + self.assertEqual("91----test-host.example.com-0", + utils.sanitize_hostname(hostname)) + + def test_hostname_translate(self): + hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" + self.assertEqual("hello", utils.sanitize_hostname(hostname)) + + def test_bool_from_str(self): + self.assertTrue(utils.bool_from_str('1')) + self.assertTrue(utils.bool_from_str('2')) + self.assertTrue(utils.bool_from_str('-1')) + self.assertTrue(utils.bool_from_str('true')) + self.assertTrue(utils.bool_from_str('True')) + self.assertTrue(utils.bool_from_str('tRuE')) + self.assertFalse(utils.bool_from_str('False')) + self.assertFalse(utils.bool_from_str('false')) + self.assertFalse(utils.bool_from_str('0')) + self.assertFalse(utils.bool_from_str(None)) + self.assertFalse(utils.bool_from_str('junk')) + + def test_generate_glance_url(self): + generated_url = utils.generate_glance_url() + actual_url = "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + self.assertEqual(generated_url, actual_url) + + def test_read_cached_file(self): + self.mox.StubOutWithMock(os.path, "getmtime") + os.path.getmtime(mox.IgnoreArg()).AndReturn(1) + self.mox.ReplayAll() + + cache_data = {"data": 1123, "mtime": 1} + data = utils.read_cached_file("/this/is/a/fake", cache_data) + self.assertEqual(cache_data["data"], data) + + def test_read_modified_cached_file(self): + self.mox.StubOutWithMock(os.path, "getmtime") + self.mox.StubOutWithMock(__builtin__, 'open') + os.path.getmtime(mox.IgnoreArg()).AndReturn(2) + + fake_contents = "lorem ipsum" + fake_file = self.mox.CreateMockAnything() + fake_file.read().AndReturn(fake_contents) + fake_context_manager = self.mox.CreateMockAnything() + fake_context_manager.__enter__().AndReturn(fake_file) + fake_context_manager.__exit__(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + + __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager) + + self.mox.ReplayAll() + cache_data = {"data": 1123, "mtime": 1} + self.reload_called = False + + def test_reload(reloaded_data): + self.assertEqual(reloaded_data, fake_contents) + self.reload_called = True + + data = utils.read_cached_file("/this/is/a/fake", cache_data, + reload_func=test_reload) + self.assertEqual(data, fake_contents) + self.assertTrue(self.reload_called) + + def test_generate_password(self): + password = utils.generate_password() + self.assertTrue([c for c in password if c in '0123456789']) + self.assertTrue([c for c in password + if c in 'abcdefghijklmnopqrstuvwxyz']) + self.assertTrue([c for c in password + if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']) + + def test_read_file_as_root(self): + def fake_execute(*args, **kwargs): + if args[1] == 'bad': + raise exception.ProcessExecutionError + return 'fakecontents', None + + self.stubs.Set(utils, 'execute', fake_execute) + contents = utils.read_file_as_root('good') + self.assertEqual(contents, 'fakecontents') + self.assertRaises(exception.FileNotFound, + utils.read_file_as_root, 'bad') + + def test_strcmp_const_time(self): + self.assertTrue(utils.strcmp_const_time('abc123', 'abc123')) + self.assertFalse(utils.strcmp_const_time('a', 'aaaaa')) + self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123')) + + def test_temporary_chown(self): + def fake_execute(*args, **kwargs): + if args[0] == 'chown': + fake_execute.uid = args[1] + self.stubs.Set(utils, 'execute', fake_execute) + + with tempfile.NamedTemporaryFile() as f: + with utils.temporary_chown(f.name, owner_uid=2): + self.assertEqual(fake_execute.uid, 2) + self.assertEqual(fake_execute.uid, os.getuid()) + + +class IsUUIDLikeTestCase(test.TestCase): + def assertUUIDLike(self, val, expected): + result = utils.is_uuid_like(val) + self.assertEqual(result, expected) + + def test_good_uuid(self): + val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.assertUUIDLike(val, True) + + def test_integer_passed(self): + val = 1 + self.assertUUIDLike(val, False) + + def test_non_uuid_string_passed(self): + val = 'foo-fooo' + self.assertUUIDLike(val, False) + + def test_non_uuid_string_passed2(self): + val = 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' + self.assertUUIDLike(val, False) + + def test_gen_valid_uuid(self): + self.assertUUIDLike(str(utils.gen_uuid()), True) + + +class ToPrimitiveTestCase(test.TestCase): + def test_list(self): + self.assertEquals(utils.to_primitive([1, 2, 3]), [1, 2, 3]) + + def test_empty_list(self): + self.assertEquals(utils.to_primitive([]), []) + + def test_tuple(self): + self.assertEquals(utils.to_primitive((1, 2, 3)), [1, 2, 3]) + + def test_dict(self): + self.assertEquals(utils.to_primitive(dict(a=1, b=2, c=3)), + dict(a=1, b=2, c=3)) + + def test_empty_dict(self): + self.assertEquals(utils.to_primitive({}), {}) + + def test_datetime(self): + x = datetime.datetime(1, 2, 3, 4, 5, 6, 7) + self.assertEquals(utils.to_primitive(x), "0001-02-03 04:05:06.000007") + + def test_iter(self): + class IterClass(object): + def __init__(self): + self.data = [1, 2, 3, 4, 5] + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterClass() + self.assertEquals(utils.to_primitive(x), [1, 2, 3, 4, 5]) + + def test_iteritems(self): + class IterItemsClass(object): + def __init__(self): + self.data = dict(a=1, b=2, c=3).items() + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterItemsClass() + ordered = utils.to_primitive(x) + ordered.sort() + self.assertEquals(ordered, [['a', 1], ['b', 2], ['c', 3]]) + + def test_instance(self): + class MysteryClass(object): + a = 10 + + def __init__(self): + self.b = 1 + + x = MysteryClass() + self.assertEquals(utils.to_primitive(x, convert_instances=True), + dict(b=1)) + + self.assertEquals(utils.to_primitive(x), x) + + def test_typeerror(self): + x = bytearray # Class, not instance + self.assertEquals(utils.to_primitive(x), u"") + + def test_nasties(self): + def foo(): + pass + x = [datetime, foo, dir] + ret = utils.to_primitive(x) + self.assertEquals(len(ret), 3) + self.assertTrue(ret[0].startswith(u"') + + +class MonkeyPatchTestCase(test.TestCase): + """Unit test for utils.monkey_patch().""" + def setUp(self): + super(MonkeyPatchTestCase, self).setUp() + self.example_package = 'cinder.tests.monkey_patch_example.' + self.flags( + monkey_patch=True, + monkey_patch_modules=[self.example_package + 'example_a' + ':' + + self.example_package + 'example_decorator']) + + def test_monkey_patch(self): + utils.monkey_patch() + cinder.tests.monkey_patch_example.CALLED_FUNCTION = [] + from cinder.tests.monkey_patch_example import example_a + from cinder.tests.monkey_patch_example import example_b + + self.assertEqual('Example function', example_a.example_function_a()) + exampleA = example_a.ExampleClassA() + exampleA.example_method() + ret_a = exampleA.example_method_add(3, 5) + self.assertEqual(ret_a, 8) + + self.assertEqual('Example function', example_b.example_function_b()) + exampleB = example_b.ExampleClassB() + exampleB.example_method() + ret_b = exampleB.example_method_add(3, 5) + + self.assertEqual(ret_b, 8) + package_a = self.example_package + 'example_a.' + self.assertTrue(package_a + 'example_function_a' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + + self.assertTrue(package_a + 'ExampleClassA.example_method' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertTrue(package_a + 'ExampleClassA.example_method_add' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + package_b = self.example_package + 'example_b.' + self.assertFalse(package_b + 'example_function_b' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + self.assertFalse(package_b + 'ExampleClassB.example_method_add' + in cinder.tests.monkey_patch_example.CALLED_FUNCTION) + + +class DeprecationTest(test.TestCase): + def setUp(self): + super(DeprecationTest, self).setUp() + + def fake_warn_deprecated_class(cls, msg): + self.warn = ('class', cls, msg) + + def fake_warn_deprecated_function(func, msg): + self.warn = ('function', func, msg) + + self.stubs.Set(utils, 'warn_deprecated_class', + fake_warn_deprecated_class) + self.stubs.Set(utils, 'warn_deprecated_function', + fake_warn_deprecated_function) + self.warn = None + + def test_deprecated_function_no_message(self): + def test_function(): + pass + + decorated = utils.deprecated()(test_function) + + decorated() + self.assertEqual(self.warn, ('function', test_function, '')) + + def test_deprecated_function_with_message(self): + def test_function(): + pass + + decorated = utils.deprecated('string')(test_function) + + decorated() + self.assertEqual(self.warn, ('function', test_function, 'string')) + + def test_deprecated_class_no_message(self): + @utils.deprecated() + class TestClass(object): + pass + + TestClass() + self.assertEqual(self.warn, ('class', TestClass, '')) + + def test_deprecated_class_with_message(self): + @utils.deprecated('string') + class TestClass(object): + pass + + TestClass() + self.assertEqual(self.warn, ('class', TestClass, 'string')) + + def test_deprecated_classmethod_no_message(self): + @utils.deprecated() + class TestClass(object): + @classmethod + def class_method(cls): + pass + + TestClass.class_method() + self.assertEqual(self.warn, ('class', TestClass, '')) + + def test_deprecated_classmethod_with_message(self): + @utils.deprecated('string') + class TestClass(object): + @classmethod + def class_method(cls): + pass + + TestClass.class_method() + self.assertEqual(self.warn, ('class', TestClass, 'string')) + + def test_deprecated_staticmethod_no_message(self): + @utils.deprecated() + class TestClass(object): + @staticmethod + def static_method(): + pass + + TestClass.static_method() + self.assertEqual(self.warn, ('class', TestClass, '')) + + def test_deprecated_staticmethod_with_message(self): + @utils.deprecated('string') + class TestClass(object): + @staticmethod + def static_method(): + pass + + TestClass.static_method() + self.assertEqual(self.warn, ('class', TestClass, 'string')) + + def test_deprecated_instancemethod(self): + @utils.deprecated() + class TestClass(object): + def instance_method(self): + pass + + # Instantiate the class... + obj = TestClass() + self.assertEqual(self.warn, ('class', TestClass, '')) + + # Reset warn... + self.warn = None + + # Call the instance method... + obj.instance_method() + + # Make sure that did *not* generate a warning + self.assertEqual(self.warn, None) + + def test_service_is_up(self): + fts_func = datetime.datetime.fromtimestamp + fake_now = 1000 + down_time = 5 + + self.flags(service_down_time=down_time) + self.mox.StubOutWithMock(utils, 'utcnow') + + # Up (equal) + utils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time), + 'created_at': fts_func(fake_now - down_time)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Up + utils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time + 1), + 'created_at': fts_func(fake_now - down_time + 1)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertTrue(result) + + self.mox.ResetAll() + # Down + utils.utcnow().AndReturn(fts_func(fake_now)) + service = {'updated_at': fts_func(fake_now - down_time - 1), + 'created_at': fts_func(fake_now - down_time - 1)} + self.mox.ReplayAll() + result = utils.service_is_up(service) + self.assertFalse(result) + + def test_xhtml_escape(self): + self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) + self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) + + def test_hash_file(self): + data = 'Mary had a little lamb, its fleece as white as snow' + flo = StringIO.StringIO(data) + h1 = utils.hash_file(flo) + h2 = hashlib.sha1(data).hexdigest() + self.assertEquals(h1, h2) + + +class Iso8601TimeTest(test.TestCase): + + def _instaneous(self, timestamp, yr, mon, day, hr, min, sec, micro): + self.assertEquals(timestamp.year, yr) + self.assertEquals(timestamp.month, mon) + self.assertEquals(timestamp.day, day) + self.assertEquals(timestamp.hour, hr) + self.assertEquals(timestamp.minute, min) + self.assertEquals(timestamp.second, sec) + self.assertEquals(timestamp.microsecond, micro) + + def _do_test(self, str, yr, mon, day, hr, min, sec, micro, shift): + DAY_SECONDS = 24 * 60 * 60 + timestamp = utils.parse_isotime(str) + self._instaneous(timestamp, yr, mon, day, hr, min, sec, micro) + offset = timestamp.tzinfo.utcoffset(None) + self.assertEqual(offset.seconds + offset.days * DAY_SECONDS, shift) + + def test_zulu(self): + str = '2012-02-14T20:53:07Z' + self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, 0) + + def test_zulu_micros(self): + str = '2012-02-14T20:53:07.123Z' + self._do_test(str, 2012, 02, 14, 20, 53, 7, 123000, 0) + + def test_offset_east(self): + str = '2012-02-14T20:53:07+04:30' + offset = 4.5 * 60 * 60 + self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, offset) + + def test_offset_east_micros(self): + str = '2012-02-14T20:53:07.42+04:30' + offset = 4.5 * 60 * 60 + self._do_test(str, 2012, 02, 14, 20, 53, 7, 420000, offset) + + def test_offset_west(self): + str = '2012-02-14T20:53:07-05:30' + offset = -5.5 * 60 * 60 + self._do_test(str, 2012, 02, 14, 20, 53, 7, 0, offset) + + def test_offset_west_micros(self): + str = '2012-02-14T20:53:07.654321-05:30' + offset = -5.5 * 60 * 60 + self._do_test(str, 2012, 02, 14, 20, 53, 7, 654321, offset) + + def test_compare(self): + zulu = utils.parse_isotime('2012-02-14T20:53:07') + east = utils.parse_isotime('2012-02-14T20:53:07-01:00') + west = utils.parse_isotime('2012-02-14T20:53:07+01:00') + self.assertTrue(east > west) + self.assertTrue(east > zulu) + self.assertTrue(zulu > west) + + def test_compare_micros(self): + zulu = utils.parse_isotime('2012-02-14T20:53:07.6544') + east = utils.parse_isotime('2012-02-14T19:53:07.654321-01:00') + west = utils.parse_isotime('2012-02-14T21:53:07.655+01:00') + self.assertTrue(east < west) + self.assertTrue(east < zulu) + self.assertTrue(zulu < west) + + def test_zulu_roundtrip(self): + str = '2012-02-14T20:53:07Z' + zulu = utils.parse_isotime(str) + self.assertEquals(zulu.tzinfo, iso8601.iso8601.UTC) + self.assertEquals(utils.isotime(zulu), str) + + def test_east_roundtrip(self): + str = '2012-02-14T20:53:07-07:00' + east = utils.parse_isotime(str) + self.assertEquals(east.tzinfo.tzname(None), '-07:00') + self.assertEquals(utils.isotime(east), str) + + def test_west_roundtrip(self): + str = '2012-02-14T20:53:07+11:30' + west = utils.parse_isotime(str) + self.assertEquals(west.tzinfo.tzname(None), '+11:30') + self.assertEquals(utils.isotime(west), str) + + def test_now_roundtrip(self): + str = utils.isotime() + now = utils.parse_isotime(str) + self.assertEquals(now.tzinfo, iso8601.iso8601.UTC) + self.assertEquals(utils.isotime(now), str) + + def test_zulu_normalize(self): + str = '2012-02-14T20:53:07Z' + zulu = utils.parse_isotime(str) + normed = utils.normalize_time(zulu) + self._instaneous(normed, 2012, 2, 14, 20, 53, 07, 0) + + def test_east_normalize(self): + str = '2012-02-14T20:53:07-07:00' + east = utils.parse_isotime(str) + normed = utils.normalize_time(east) + self._instaneous(normed, 2012, 2, 15, 03, 53, 07, 0) + + def test_west_normalize(self): + str = '2012-02-14T20:53:07+21:00' + west = utils.parse_isotime(str) + normed = utils.normalize_time(west) + self._instaneous(normed, 2012, 2, 13, 23, 53, 07, 0) + + +class TestGreenLocks(test.TestCase): + def test_concurrent_green_lock_succeeds(self): + """Verify spawn_n greenthreads with two locks run concurrently. + + This succeeds with spawn but fails with spawn_n because lockfile + gets the same thread id for both spawn_n threads. Our workaround + of using the GreenLockFile will work even if the issue is fixed. + """ + self.completed = False + with utils.tempdir() as tmpdir: + + def locka(wait): + a = utils.GreenLockFile(os.path.join(tmpdir, 'a')) + a.acquire() + wait.wait() + a.release() + self.completed = True + + def lockb(wait): + b = utils.GreenLockFile(os.path.join(tmpdir, 'b')) + b.acquire() + wait.wait() + b.release() + + wait1 = eventlet.event.Event() + wait2 = eventlet.event.Event() + pool = greenpool.GreenPool() + pool.spawn_n(locka, wait1) + pool.spawn_n(lockb, wait2) + wait2.send() + eventlet.sleep(0) + wait1.send() + pool.waitall() + self.assertTrue(self.completed) + + +class TestLockCleanup(test.TestCase): + """unit tests for utils.cleanup_file_locks()""" + + def setUp(self): + super(TestLockCleanup, self).setUp() + + self.pid = os.getpid() + self.dead_pid = self._get_dead_pid() + self.tempdir = tempfile.mkdtemp() + self.flags(lock_path=self.tempdir) + self.lock_name = 'cinder-testlock' + self.lock_file = os.path.join(FLAGS.lock_path, + self.lock_name + '.lock') + self.hostname = socket.gethostname() + print self.pid, self.dead_pid + try: + os.unlink(self.lock_file) + except OSError as (errno, strerror): + if errno == 2: + pass + + def tearDown(self): + shutil.rmtree(self.tempdir) + super(TestLockCleanup, self).tearDown() + + def _get_dead_pid(self): + """get a pid for a process that does not exist""" + + candidate_pid = self.pid - 1 + while os.path.exists(os.path.join('/proc', str(candidate_pid))): + candidate_pid -= 1 + if candidate_pid == 1: + return 0 + return candidate_pid + + def _get_sentinel_name(self, hostname, pid, thread='MainThread'): + return os.path.join(FLAGS.lock_path, + '%s.%s-%d' % (hostname, thread, pid)) + + def _create_sentinel(self, hostname, pid, thread='MainThread'): + name = self._get_sentinel_name(hostname, pid, thread) + open(name, 'wb').close() + return name + + def test_clean_stale_locks(self): + """verify locks for dead processes are cleaned up""" + + # create sentinels for two processes, us and a 'dead' one + # no active lock + sentinel1 = self._create_sentinel(self.hostname, self.pid) + sentinel2 = self._create_sentinel(self.hostname, self.dead_pid) + + utils.cleanup_file_locks() + + self.assertTrue(os.path.exists(sentinel1)) + self.assertFalse(os.path.exists(self.lock_file)) + self.assertFalse(os.path.exists(sentinel2)) + + os.unlink(sentinel1) + + def test_clean_stale_locks_active(self): + """verify locks for dead processes are cleaned with an active lock """ + + # create sentinels for two processes, us and a 'dead' one + # create an active lock for us + sentinel1 = self._create_sentinel(self.hostname, self.pid) + sentinel2 = self._create_sentinel(self.hostname, self.dead_pid) + os.link(sentinel1, self.lock_file) + + utils.cleanup_file_locks() + + self.assertTrue(os.path.exists(sentinel1)) + self.assertTrue(os.path.exists(self.lock_file)) + self.assertFalse(os.path.exists(sentinel2)) + + os.unlink(sentinel1) + os.unlink(self.lock_file) + + def test_clean_stale_with_threads(self): + """verify locks for multiple threads are cleaned up """ + + # create sentinels for four threads in our process, and a 'dead' + # process. no lock. + sentinel1 = self._create_sentinel(self.hostname, self.pid, 'Default-1') + sentinel2 = self._create_sentinel(self.hostname, self.pid, 'Default-2') + sentinel3 = self._create_sentinel(self.hostname, self.pid, 'Default-3') + sentinel4 = self._create_sentinel(self.hostname, self.pid, 'Default-4') + sentinel5 = self._create_sentinel(self.hostname, self.dead_pid, + 'Default-1') + + utils.cleanup_file_locks() + + self.assertTrue(os.path.exists(sentinel1)) + self.assertTrue(os.path.exists(sentinel2)) + self.assertTrue(os.path.exists(sentinel3)) + self.assertTrue(os.path.exists(sentinel4)) + self.assertFalse(os.path.exists(self.lock_file)) + self.assertFalse(os.path.exists(sentinel5)) + + os.unlink(sentinel1) + os.unlink(sentinel2) + os.unlink(sentinel3) + os.unlink(sentinel4) + + def test_clean_stale_with_threads_active(self): + """verify locks for multiple threads are cleaned up """ + + # create sentinels for four threads in our process, and a 'dead' + # process + sentinel1 = self._create_sentinel(self.hostname, self.pid, 'Default-1') + sentinel2 = self._create_sentinel(self.hostname, self.pid, 'Default-2') + sentinel3 = self._create_sentinel(self.hostname, self.pid, 'Default-3') + sentinel4 = self._create_sentinel(self.hostname, self.pid, 'Default-4') + sentinel5 = self._create_sentinel(self.hostname, self.dead_pid, + 'Default-1') + + os.link(sentinel1, self.lock_file) + + utils.cleanup_file_locks() + + self.assertTrue(os.path.exists(sentinel1)) + self.assertTrue(os.path.exists(sentinel2)) + self.assertTrue(os.path.exists(sentinel3)) + self.assertTrue(os.path.exists(sentinel4)) + self.assertTrue(os.path.exists(self.lock_file)) + self.assertFalse(os.path.exists(sentinel5)) + + os.unlink(sentinel1) + os.unlink(sentinel2) + os.unlink(sentinel3) + os.unlink(sentinel4) + os.unlink(self.lock_file) + + def test_clean_bogus_lockfiles(self): + """verify lockfiles are cleaned """ + + lock1 = os.path.join(FLAGS.lock_path, 'cinder-testlock1.lock') + lock2 = os.path.join(FLAGS.lock_path, 'cinder-testlock2.lock') + lock3 = os.path.join(FLAGS.lock_path, 'testlock3.lock') + + open(lock1, 'wb').close() + open(lock2, 'wb').close() + open(lock3, 'wb').close() + + utils.cleanup_file_locks() + + self.assertFalse(os.path.exists(lock1)) + self.assertFalse(os.path.exists(lock2)) + self.assertTrue(os.path.exists(lock3)) + + os.unlink(lock3) + + +class AuditPeriodTest(test.TestCase): + + def setUp(self): + super(AuditPeriodTest, self).setUp() + #a fairly random time to test with + self.test_time = datetime.datetime(second=23, + minute=12, + hour=8, + day=5, + month=3, + year=2012) + utils.set_time_override(override_time=self.test_time) + + def tearDown(self): + utils.clear_time_override() + super(AuditPeriodTest, self).tearDown() + + def test_hour(self): + begin, end = utils.last_completed_audit_period(unit='hour') + self.assertEquals(begin, datetime.datetime( + hour=7, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + hour=8, + day=5, + month=3, + year=2012)) + + def test_hour_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='hour@10') + self.assertEquals(begin, datetime.datetime( + minute=10, + hour=7, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + minute=10, + hour=8, + day=5, + month=3, + year=2012)) + + def test_hour_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='hour@30') + self.assertEquals(begin, datetime.datetime( + minute=30, + hour=6, + day=5, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + minute=30, + hour=7, + day=5, + month=3, + year=2012)) + + def test_day(self): + begin, end = utils.last_completed_audit_period(unit='day') + self.assertEquals(begin, datetime.datetime( + day=4, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + day=5, + month=3, + year=2012)) + + def test_day_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='day@6') + self.assertEquals(begin, datetime.datetime( + hour=6, + day=4, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + hour=6, + day=5, + month=3, + year=2012)) + + def test_day_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='day@10') + self.assertEquals(begin, datetime.datetime( + hour=10, + day=3, + month=3, + year=2012)) + self.assertEquals(end, datetime.datetime( + hour=10, + day=4, + month=3, + year=2012)) + + def test_month(self): + begin, end = utils.last_completed_audit_period(unit='month') + self.assertEquals(begin, datetime.datetime( + day=1, + month=2, + year=2012)) + self.assertEquals(end, datetime.datetime( + day=1, + month=3, + year=2012)) + + def test_month_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='month@2') + self.assertEquals(begin, datetime.datetime( + day=2, + month=2, + year=2012)) + self.assertEquals(end, datetime.datetime( + day=2, + month=3, + year=2012)) + + def test_month_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='month@15') + self.assertEquals(begin, datetime.datetime( + day=15, + month=1, + year=2012)) + self.assertEquals(end, datetime.datetime( + day=15, + month=2, + year=2012)) + + def test_year(self): + begin, end = utils.last_completed_audit_period(unit='year') + self.assertEquals(begin, datetime.datetime( + day=1, + month=1, + year=2011)) + self.assertEquals(end, datetime.datetime( + day=1, + month=1, + year=2012)) + + def test_year_with_offset_before_current(self): + begin, end = utils.last_completed_audit_period(unit='year@2') + self.assertEquals(begin, datetime.datetime( + day=1, + month=2, + year=2011)) + self.assertEquals(end, datetime.datetime( + day=1, + month=2, + year=2012)) + + def test_year_with_offset_after_current(self): + begin, end = utils.last_completed_audit_period(unit='year@6') + self.assertEquals(begin, datetime.datetime( + day=1, + month=6, + year=2010)) + self.assertEquals(end, datetime.datetime( + day=1, + month=6, + year=2011)) diff --git a/cinder/tests/test_versions.py b/cinder/tests/test_versions.py new file mode 100644 index 00000000000..c7a88c5a3e6 --- /dev/null +++ b/cinder/tests/test_versions.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Ken Pepple +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from cinder import test +from cinder import version + + +class VersionTestCase(test.TestCase): + """Test cases for Versions code""" + def setUp(self): + """setup test with unchanging values""" + super(VersionTestCase, self).setUp() + self.version = version + self.version.FINAL = False + self.version.CINDER_VERSION = ['2012', '10'] + self.version.YEAR, self.version.COUNT = self.version.CINDER_VERSION + self.version.version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + + def test_version_string_is_good(self): + """Ensure version string works""" + self.assertEqual("2012.10-dev", self.version.version_string()) + + def test_canonical_version_string_is_good(self): + """Ensure canonical version works""" + self.assertEqual("2012.10", self.version.canonical_version_string()) + + def test_final_version_strings_are_identical(self): + """Ensure final version strings match only at release""" + self.assertNotEqual(self.version.canonical_version_string(), + self.version.version_string()) + self.version.FINAL = True + self.assertEqual(self.version.canonical_version_string(), + self.version.version_string()) + + def test_vcs_version_string_is_good(self): + """Ensure uninstalled code generates local """ + self.assertEqual("LOCALBRANCH:LOCALREVISION", + self.version.vcs_version_string()) + + def test_version_string_with_vcs_is_good(self): + """Ensure uninstalled code get version string""" + self.assertEqual("2012.10-LOCALBRANCH:LOCALREVISION", + self.version.version_string_with_vcs()) diff --git a/cinder/tests/test_volume.py b/cinder/tests/test_volume.py new file mode 100644 index 00000000000..aebc259425b --- /dev/null +++ b/cinder/tests/test_volume.py @@ -0,0 +1,501 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Volume Code. + +""" + +import cStringIO + +import mox + +from cinder import context +from cinder import exception +from cinder import db +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import importutils +import cinder.policy +from cinder import rpc +from cinder import test +import cinder.volume.api + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class VolumeTestCase(test.TestCase): + """Test Case for volumes.""" + + def setUp(self): + super(VolumeTestCase, self).setUp() + self.flags(connection_type='fake') + self.volume = importutils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumeTestCase, self).tearDown() + + @staticmethod + def _create_volume(size='0', snapshot_id=None): + """Create a volume object.""" + vol = {} + vol['size'] = size + vol['snapshot_id'] = snapshot_id + vol['user_id'] = 'fake' + vol['project_id'] = 'fake' + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + return db.volume_create(context.get_admin_context(), vol) + + def test_create_delete_volume(self): + """Test volume can be created and deleted.""" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), + volume_id).id) + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.NotFound, + db.volume_get, + self.context, + volume_id) + + def test_delete_busy_volume(self): + """Test volume survives deletion if driver reports it as busy.""" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + + self.mox.StubOutWithMock(self.volume.driver, 'delete_volume') + self.volume.driver.delete_volume(mox.IgnoreArg()) \ + .AndRaise(exception.VolumeIsBusy) + self.mox.ReplayAll() + res = self.volume.delete_volume(self.context, volume_id) + self.assertEqual(True, res) + volume_ref = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(volume_id, volume_ref.id) + self.assertEqual("available", volume_ref.status) + + self.mox.UnsetStubs() + self.volume.delete_volume(self.context, volume_id) + + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src = self._create_volume() + self.volume.create_volume(self.context, volume_src['id']) + snapshot_id = self._create_snapshot(volume_src['id']) + self.volume.create_snapshot(self.context, volume_src['id'], + snapshot_id) + volume_dst = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst['id'], snapshot_id) + self.assertEqual(volume_dst['id'], + db.volume_get( + context.get_admin_context(), + volume_dst['id']).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst['id']).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst['id']) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src['id']) + + def test_too_big_volume(self): + """Ensure failure if a too large of a volume is requested.""" + # FIXME(vish): validation needs to move into the data layer in + # volume_create + return True + try: + volume = self._create_volume('1001') + self.volume.create_volume(self.context, volume) + self.fail("Should have thrown TypeError") + except TypeError: + pass + + def test_too_many_volumes(self): + """Ensure that NoMoreTargets is raised when we run out of volumes.""" + vols = [] + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + vols.append(volume['id']) + volume = self._create_volume() + self.assertRaises(db.NoMoreTargets, + self.volume.create_volume, + self.context, + volume['id']) + db.volume_destroy(context.get_admin_context(), volume['id']) + for volume_id in vols: + self.volume.delete_volume(self.context, volume_id) + + def test_run_attach_detach_volume(self): + """Make sure volume can be attached and detached from instance.""" + instance_id = 'fake-inst' + mountpoint = "/dev/sdf" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + if FLAGS.fake_tests: + db.volume_attached(self.context, volume_id, instance_id, + mountpoint) + else: + self.compute.attach_volume(self.context, + instance_id, + volume_id, + mountpoint) + vol = db.volume_get(context.get_admin_context(), volume_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['instance_id'], instance_id) + + self.assertRaises(exception.Error, + self.volume.delete_volume, + self.context, + volume_id) + if FLAGS.fake_tests: + db.volume_detached(self.context, volume_id) + else: + pass + self.compute.detach_volume(self.context, + instance_id, + volume_id) + vol = db.volume_get(self.context, volume_id) + self.assertEqual(vol['status'], "available") + + self.volume.delete_volume(self.context, volume_id) + self.assertRaises(exception.VolumeNotFound, + db.volume_get, + self.context, + volume_id) + + def test_concurrent_volumes_get_different_targets(self): + """Ensure multiple concurrent volumes get different targets.""" + volume_ids = [] + targets = [] + + def _check(volume_id): + """Make sure targets aren't duplicated.""" + volume_ids.append(volume_id) + admin_context = context.get_admin_context() + iscsi_target = db.volume_get_iscsi_target_num(admin_context, + volume_id) + self.assert_(iscsi_target not in targets) + targets.append(iscsi_target) + LOG.debug(_("Target %s allocated"), iscsi_target) + total_slots = FLAGS.iscsi_num_targets + for _index in xrange(total_slots): + volume = self._create_volume() + d = self.volume.create_volume(self.context, volume['id']) + _check(d) + for volume_id in volume_ids: + self.volume.delete_volume(self.context, volume_id) + + def test_multi_node(self): + # TODO(termie): Figure out how to test with two nodes, + # each of them having a different FLAG for storage_node + # This will allow us to test cross-node interactions + pass + + @staticmethod + def _create_snapshot(volume_id, size='0'): + """Create a snapshot object.""" + snap = {} + snap['volume_size'] = size + snap['user_id'] = 'fake' + snap['project_id'] = 'fake' + snap['volume_id'] = volume_id + snap['status'] = "creating" + return db.snapshot_create(context.get_admin_context(), snap)['id'] + + def test_create_delete_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id']) + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + self.volume.delete_snapshot(self.context, snapshot_id) + self.assertRaises(exception.NotFound, + db.snapshot_get, + self.context, + snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_cant_delete_volume_with_snapshots(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id']) + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + self.assertEqual(snapshot_id, + db.snapshot_get(context.get_admin_context(), + snapshot_id).id) + + volume['status'] = 'available' + volume['host'] = 'fakehost' + + volume_api = cinder.volume.api.API() + + self.assertRaises(exception.InvalidVolume, + volume_api.delete, + self.context, + volume) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_can_delete_errored_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + snapshot_id = self._create_snapshot(volume['id']) + self.volume.create_snapshot(self.context, volume['id'], snapshot_id) + snapshot = db.snapshot_get(context.get_admin_context(), + snapshot_id) + + volume_api = cinder.volume.api.API() + + snapshot['status'] = 'badstatus' + self.assertRaises(exception.InvalidVolume, + volume_api.delete_snapshot, + self.context, + snapshot) + + snapshot['status'] = 'error' + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume['id']) + + def test_create_snapshot_force(self): + """Test snapshot in use can be created forcibly.""" + + def fake_cast(ctxt, topic, msg): + pass + self.stubs.Set(rpc, 'cast', fake_cast) + instance_id = 'fake-inst' + + volume = self._create_volume() + self.volume.create_volume(self.context, volume['id']) + db.volume_attached(self.context, volume['id'], instance_id, + '/dev/sda1') + + volume_api = cinder.volume.api.API() + volume = volume_api.get(self.context, volume['id']) + self.assertRaises(exception.InvalidVolume, + volume_api.create_snapshot, + self.context, volume, + 'fake_name', 'fake_description') + snapshot_ref = volume_api.create_snapshot_force(self.context, + volume, + 'fake_name', + 'fake_description') + db.snapshot_destroy(self.context, snapshot_ref['id']) + db.volume_destroy(self.context, volume['id']) + + def test_delete_busy_snapshot(self): + """Test snapshot can be created and deleted.""" + volume = self._create_volume() + volume_id = volume['id'] + self.volume.create_volume(self.context, volume_id) + snapshot_id = self._create_snapshot(volume_id) + self.volume.create_snapshot(self.context, volume_id, snapshot_id) + + self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot') + self.volume.driver.delete_snapshot(mox.IgnoreArg()) \ + .AndRaise(exception.SnapshotIsBusy) + self.mox.ReplayAll() + self.volume.delete_snapshot(self.context, snapshot_id) + snapshot_ref = db.snapshot_get(self.context, snapshot_id) + self.assertEqual(snapshot_id, snapshot_ref.id) + self.assertEqual("available", snapshot_ref.status) + + self.mox.UnsetStubs() + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_id) + + +class DriverTestCase(test.TestCase): + """Base Test class for Drivers.""" + driver_name = "cinder.volume.driver.FakeBaseDriver" + + def setUp(self): + super(DriverTestCase, self).setUp() + self.flags(volume_driver=self.driver_name, + logging_default_format_string="%(message)s") + self.volume = importutils.import_object(FLAGS.volume_manager) + self.context = context.get_admin_context() + self.output = "" + + def _fake_execute(_command, *_args, **_kwargs): + """Fake _execute.""" + return self.output, None + self.volume.driver.set_execute(_fake_execute) + + log = logging.getLogger() + self.stream = cStringIO.StringIO() + log.logger.addHandler(logging.logging.StreamHandler(self.stream)) + + def _attach_volume(self): + """Attach volumes to an instance. This function also sets + a fake log message.""" + return [] + + def _detach_volume(self, volume_id_list): + """Detach volumes from an instance.""" + for volume_id in volume_id_list: + db.volume_detached(self.context, volume_id) + self.volume.delete_volume(self.context, volume_id) + + +class VolumeDriverTestCase(DriverTestCase): + """Test case for VolumeDriver""" + driver_name = "cinder.volume.driver.VolumeDriver" + + def test_delete_busy_volume(self): + """Test deleting a busy volume.""" + self.stubs.Set(self.volume.driver, '_volume_not_present', + lambda x: False) + self.stubs.Set(self.volume.driver, '_delete_volume', + lambda x, y: False) + # Want DriverTestCase._fake_execute to return 'o' so that + # volume.driver.delete_volume() raises the VolumeIsBusy exception. + self.output = 'o' + self.assertRaises(exception.VolumeIsBusy, + self.volume.driver.delete_volume, + {'name': 'test1', 'size': 1024}) + # when DriverTestCase._fake_execute returns something other than + # 'o' volume.driver.delete_volume() does not raise an exception. + self.output = 'x' + self.volume.driver.delete_volume({'name': 'test1', 'size': 1024}) + + +class ISCSITestCase(DriverTestCase): + """Test Case for ISCSIDriver""" + driver_name = "cinder.volume.driver.ISCSIDriver" + + def _attach_volume(self): + """Attach volumes to an instance. This function also sets + a fake log message.""" + volume_id_list = [] + for index in xrange(3): + vol = {} + vol['size'] = 0 + vol_ref = db.volume_create(self.context, vol) + self.volume.create_volume(self.context, vol_ref['id']) + vol_ref = db.volume_get(self.context, vol_ref['id']) + + # each volume has a different mountpoint + mountpoint = "/dev/sd" + chr((ord('b') + index)) + instance_id = 'fake-inst' + db.volume_attached(self.context, vol_ref['id'], instance_id, + mountpoint) + volume_id_list.append(vol_ref['id']) + + return volume_id_list + + def test_check_for_export_with_no_volume(self): + """No log message when no volume is attached to an instance.""" + self.stream.truncate(0) + instance_id = 'fake-inst' + self.volume.check_for_export(self.context, instance_id) + self.assertEqual(self.stream.getvalue(), '') + + def test_check_for_export_with_all_volume_exported(self): + """No log message when all the processes are running.""" + volume_id_list = self._attach_volume() + + self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target') + for i in volume_id_list: + tid = db.volume_get_iscsi_target_num(self.context, i) + self.volume.driver.tgtadm.show_target(tid) + + self.stream.truncate(0) + self.mox.ReplayAll() + instance_id = 'fake-inst' + self.volume.check_for_export(self.context, instance_id) + self.assertEqual(self.stream.getvalue(), '') + self.mox.UnsetStubs() + + self._detach_volume(volume_id_list) + + def test_check_for_export_with_some_volume_missing(self): + """Output a warning message when some volumes are not recognied + by ietd.""" + volume_id_list = self._attach_volume() + instance_id = 'fake-inst' + + tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0]) + self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target') + self.volume.driver.tgtadm.show_target(tid).AndRaise( + exception.ProcessExecutionError()) + + self.mox.ReplayAll() + self.assertRaises(exception.ProcessExecutionError, + self.volume.check_for_export, + self.context, + instance_id) + msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0] + self.assertTrue(0 <= self.stream.getvalue().find(msg)) + self.mox.UnsetStubs() + + self._detach_volume(volume_id_list) + + +class VolumePolicyTestCase(test.TestCase): + + def setUp(self): + super(VolumePolicyTestCase, self).setUp() + + cinder.policy.reset() + cinder.policy.init() + + self.context = context.get_admin_context() + + def tearDown(self): + super(VolumePolicyTestCase, self).tearDown() + cinder.policy.reset() + + def _set_rules(self, rules): + cinder.common.policy.set_brain(cinder.common.policy.HttpBrain(rules)) + + def test_check_policy(self): + self.mox.StubOutWithMock(cinder.policy, 'enforce') + target = { + 'project_id': self.context.project_id, + 'user_id': self.context.user_id, + } + cinder.policy.enforce(self.context, 'volume:attach', target) + self.mox.ReplayAll() + cinder.volume.api.check_policy(self.context, 'attach') + + def test_check_policy_with_target(self): + self.mox.StubOutWithMock(cinder.policy, 'enforce') + target = { + 'project_id': self.context.project_id, + 'user_id': self.context.user_id, + 'id': 2, + } + cinder.policy.enforce(self.context, 'volume:attach', target) + self.mox.ReplayAll() + cinder.volume.api.check_policy(self.context, 'attach', {'id': 2}) diff --git a/cinder/tests/test_volume_types.py b/cinder/tests/test_volume_types.py new file mode 100644 index 00000000000..494bd0c9964 --- /dev/null +++ b/cinder/tests/test_volume_types.py @@ -0,0 +1,167 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types code +""" +import time + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import test +from cinder.volume import volume_types +from cinder.db.sqlalchemy import session as sql_session +from cinder.db.sqlalchemy import models + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class VolumeTypeTestCase(test.TestCase): + """Test cases for volume type code""" + def setUp(self): + super(VolumeTypeTestCase, self).setUp() + + self.ctxt = context.get_admin_context() + self.vol_type1_name = str(int(time.time())) + self.vol_type1_specs = dict( + type="physical drive", + drive_type="SAS", + size="300", + rpm="7200", + visible="True") + + def test_volume_type_create_then_destroy(self): + """Ensure volume types can be created and deleted""" + prev_all_vtypes = volume_types.get_all_types(self.ctxt) + + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + LOG.info(_("Given data: %s"), self.vol_type1_specs) + LOG.info(_("Result data: %s"), new) + + for k, v in self.vol_type1_specs.iteritems(): + self.assertEqual(v, new['extra_specs'][k], + 'one of fields doesnt match') + + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(len(prev_all_vtypes) + 1, + len(new_all_vtypes), + 'drive type was not created') + + volume_types.destroy(self.ctxt, self.vol_type1_name) + new_all_vtypes = volume_types.get_all_types(self.ctxt) + self.assertEqual(prev_all_vtypes, + new_all_vtypes, + 'drive type was not deleted') + + def test_get_all_volume_types(self): + """Ensures that all volume types can be retrieved""" + session = sql_session.get_session() + total_volume_types = session.query(models.VolumeTypes).count() + vol_types = volume_types.get_all_types(self.ctxt) + self.assertEqual(total_volume_types, len(vol_types)) + + def test_non_existant_vol_type_shouldnt_delete(self): + """Ensures that volume type creation fails with invalid args""" + self.assertRaises(exception.VolumeTypeNotFoundByName, + volume_types.destroy, self.ctxt, "sfsfsdfdfs") + + def test_repeated_vol_types_shouldnt_raise(self): + """Ensures that volume duplicates don't raise""" + new_name = self.vol_type1_name + "dup" + volume_types.create(self.ctxt, new_name) + volume_types.destroy(self.ctxt, new_name) + volume_types.create(self.ctxt, new_name) + + def test_invalid_volume_types_params(self): + """Ensures that volume type creation fails with invalid args""" + self.assertRaises(exception.InvalidVolumeType, + volume_types.destroy, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type, self.ctxt, None) + self.assertRaises(exception.InvalidVolumeType, + volume_types.get_volume_type_by_name, + self.ctxt, None) + + def test_volume_type_get_by_id_and_name(self): + """Ensure volume types get returns same entry""" + volume_types.create(self.ctxt, + self.vol_type1_name, + self.vol_type1_specs) + new = volume_types.get_volume_type_by_name(self.ctxt, + self.vol_type1_name) + + new2 = volume_types.get_volume_type(self.ctxt, new['id']) + self.assertEqual(new, new2) + + def test_volume_type_search_by_extra_spec(self): + """Ensure volume types get by extra spec returns correct type""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key3": "another_value", + "key4": "val4"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key1": "val1"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type1" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key2": "val2"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type2" in vol_types.keys()) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 1) + self.assertTrue("type2" in vol_types.keys()) + + def test_volume_type_search_by_extra_spec_multiple(self): + """Ensure volume types get by extra spec returns correct type""" + volume_types.create(self.ctxt, "type1", {"key1": "val1", + "key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type2", {"key2": "val2", + "key3": "val3"}) + volume_types.create(self.ctxt, "type3", {"key1": "val1", + "key3": "val3", + "key4": "val4"}) + + vol_types = volume_types.get_all_types(self.ctxt, + search_opts={'extra_specs': {"key1": "val1", + "key3": "val3"}}) + LOG.info("vol_types: %s" % vol_types) + self.assertEqual(len(vol_types), 2) + self.assertTrue("type1" in vol_types.keys()) + self.assertTrue("type3" in vol_types.keys()) + self.assertEqual(vol_types['type1']['extra_specs'], + {"key1": "val1", "key2": "val2", "key3": "val3"}) + self.assertEqual(vol_types['type3']['extra_specs'], + {"key1": "val1", "key3": "val3", "key4": "val4"}) diff --git a/cinder/tests/test_volume_types_extra_specs.py b/cinder/tests/test_volume_types_extra_specs.py new file mode 100644 index 00000000000..e7241086fbb --- /dev/null +++ b/cinder/tests/test_volume_types_extra_specs.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for volume types extra specs code +""" + +from cinder import context +from cinder import db +from cinder import test + + +class VolumeTypeExtraSpecsTestCase(test.TestCase): + + def setUp(self): + super(VolumeTypeExtraSpecsTestCase, self).setUp() + self.context = context.get_admin_context() + self.vol_type1 = dict(name="TEST: Regular volume test") + self.vol_type1_specs = dict(vol_extra1="value1", + vol_extra2="value2", + vol_extra3=3) + self.vol_type1['extra_specs'] = self.vol_type1_specs + ref = db.volume_type_create(self.context, self.vol_type1) + self.volume_type1_id = ref.id + for k, v in self.vol_type1_specs.iteritems(): + self.vol_type1_specs[k] = str(v) + + self.vol_type2_noextra = dict(name="TEST: Volume type without extra") + ref = db.volume_type_create(self.context, self.vol_type2_noextra) + self.vol_type2_id = ref.id + + def tearDown(self): + # Remove the volume type from the database + db.volume_type_destroy(context.get_admin_context(), + self.vol_type1['name']) + db.volume_type_destroy(context.get_admin_context(), + self.vol_type2_noextra['name']) + super(VolumeTypeExtraSpecsTestCase, self).tearDown() + + def test_volume_type_specs_get(self): + expected_specs = self.vol_type1_specs.copy() + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_delete(self): + expected_specs = self.vol_type1_specs.copy() + del expected_specs['vol_extra2'] + db.volume_type_extra_specs_delete(context.get_admin_context(), + self.volume_type1_id, + 'vol_extra2') + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_update(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra3'] = "4" + db.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra3=4)) + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_extra_specs_create(self): + expected_specs = self.vol_type1_specs.copy() + expected_specs['vol_extra4'] = 'value4' + expected_specs['vol_extra5'] = 'value5' + db.volume_type_extra_specs_update_or_create( + context.get_admin_context(), + self.volume_type1_id, + dict(vol_extra4="value4", + vol_extra5="value5")) + actual_specs = db.volume_type_extra_specs_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(expected_specs, actual_specs) + + def test_volume_type_get_with_extra_specs(self): + volume_type = db.volume_type_get( + context.get_admin_context(), + self.volume_type1_id) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.volume_type_get( + context.get_admin_context(), + self.vol_type2_id) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_by_name_with_extra_specs(self): + volume_type = db.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type1['name']) + self.assertEquals(volume_type['extra_specs'], + self.vol_type1_specs) + + volume_type = db.volume_type_get_by_name( + context.get_admin_context(), + self.vol_type2_noextra['name']) + self.assertEquals(volume_type['extra_specs'], {}) + + def test_volume_type_get_all(self): + expected_specs = self.vol_type1_specs.copy() + + types = db.volume_type_get_all(context.get_admin_context()) + + self.assertEquals( + types[self.vol_type1['name']]['extra_specs'], expected_specs) + + self.assertEquals( + types[self.vol_type2_noextra['name']]['extra_specs'], {}) diff --git a/cinder/tests/test_wsgi.py b/cinder/tests/test_wsgi.py new file mode 100644 index 00000000000..cc8fb687411 --- /dev/null +++ b/cinder/tests/test_wsgi.py @@ -0,0 +1,92 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for `cinder.wsgi`.""" + +import os.path +import tempfile + +import unittest + +import cinder.exception +from cinder import test +import cinder.wsgi + + +class TestLoaderNothingExists(test.TestCase): + """Loader tests where os.path.exists always returns False.""" + + def setUp(self): + super(TestLoaderNothingExists, self).setUp() + self.stubs.Set(os.path, 'exists', lambda _: False) + + def test_config_not_found(self): + self.assertRaises( + cinder.exception.ConfigNotFound, + cinder.wsgi.Loader, + ) + + +class TestLoaderNormalFilesystem(unittest.TestCase): + """Loader tests with normal filesystem (unmodified os.path module).""" + + _paste_config = """ +[app:test_app] +use = egg:Paste#static +document_root = /tmp + """ + + def setUp(self): + self.config = tempfile.NamedTemporaryFile(mode="w+t") + self.config.write(self._paste_config.lstrip()) + self.config.seek(0) + self.config.flush() + self.loader = cinder.wsgi.Loader(self.config.name) + + def test_config_found(self): + self.assertEquals(self.config.name, self.loader.config_path) + + def test_app_not_found(self): + self.assertRaises( + cinder.exception.PasteAppNotFound, + self.loader.load_app, + "non-existant app", + ) + + def test_app_found(self): + url_parser = self.loader.load_app("test_app") + self.assertEquals("/tmp", url_parser.directory) + + def tearDown(self): + self.config.close() + + +class TestWSGIServer(unittest.TestCase): + """WSGI server tests.""" + + def test_no_app(self): + server = cinder.wsgi.Server("test_app", None) + self.assertEquals("test_app", server.name) + + def test_start_random_port(self): + server = cinder.wsgi.Server("test_random_port", None, host="127.0.0.1") + self.assertEqual(0, server.port) + server.start() + self.assertNotEqual(0, server.port) + server.stop() + server.wait() diff --git a/cinder/tests/utils.py b/cinder/tests/utils.py new file mode 100644 index 00000000000..5b9086b7542 --- /dev/null +++ b/cinder/tests/utils.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# + +import cinder.context +import cinder.db +import cinder.flags + +FLAGS = cinder.flags.FLAGS + + +def get_test_admin_context(): + return cinder.context.get_admin_context() diff --git a/cinder/utils.py b/cinder/utils.py new file mode 100644 index 00000000000..f80fb143183 --- /dev/null +++ b/cinder/utils.py @@ -0,0 +1,1678 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utilities and helper functions.""" + +import contextlib +import datetime +import errno +import functools +import hashlib +import inspect +import itertools +import json +import os +import pyclbr +import random +import re +import shlex +import shutil +import socket +import struct +import sys +import tempfile +import threading +import time +import types +import uuid +import warnings +from xml.sax import saxutils + +from eventlet import corolocal +from eventlet import event +from eventlet import greenthread +from eventlet import semaphore +from eventlet.green import subprocess +import iso8601 +import lockfile +import netaddr + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils + + +LOG = logging.getLogger(__name__) +ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" +FLAGS = flags.FLAGS + +FLAGS.register_opt( + cfg.BoolOpt('disable_process_locking', default=False, + help='Whether to disable inter-process locks')) + + +def find_config(config_path): + """Find a configuration file using the given hint. + + :param config_path: Full or relative path to the config. + :returns: Full path of the config, if it exists. + :raises: `cinder.exception.ConfigNotFound` + + """ + possible_locations = [ + config_path, + os.path.join(FLAGS.state_path, "etc", "cinder", config_path), + os.path.join(FLAGS.state_path, "etc", config_path), + os.path.join(FLAGS.state_path, config_path), + "/etc/cinder/%s" % config_path, + ] + + for path in possible_locations: + if os.path.exists(path): + return os.path.abspath(path) + + raise exception.ConfigNotFound(path=os.path.abspath(config_path)) + + +def vpn_ping(address, port, timeout=0.05, session_id=None): + """Sends a vpn negotiation packet and returns the server session. + + Returns False on a failure. Basic packet structure is below. + + Client packet (14 bytes):: + + 0 1 8 9 13 + +-+--------+-----+ + |x| cli_id |?????| + +-+--------+-----+ + x = packet identifier 0x38 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + + Server packet (26 bytes):: + + 0 1 8 9 13 14 21 2225 + +-+--------+-----+--------+----+ + |x| srv_id |?????| cli_id |????| + +-+--------+-----+--------+----+ + x = packet identifier 0x40 + cli_id = 64 bit identifier + ? = unknown, probably flags/padding + bit 9 was 1 and the rest were 0 in testing + + """ + if session_id is None: + session_id = random.randint(0, 0xffffffffffffffff) + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + data = struct.pack('!BQxxxxx', 0x38, session_id) + sock.sendto(data, (address, port)) + sock.settimeout(timeout) + try: + received = sock.recv(2048) + except socket.timeout: + return False + finally: + sock.close() + fmt = '!BQxxxxxQxxxx' + if len(received) != struct.calcsize(fmt): + print struct.calcsize(fmt) + return False + (identifier, server_sess, client_sess) = struct.unpack(fmt, received) + if identifier == 0x40 and client_sess == session_id: + return server_sess + + +def fetchfile(url, target): + LOG.debug(_('Fetching %s') % url) + execute('curl', '--fail', url, '-o', target) + + +def execute(*cmd, **kwargs): + """Helper method to execute command with optional retry. + + If you add a run_as_root=True command, don't forget to add the + corresponding filter to cinder.rootwrap ! + + :param cmd: Passed to subprocess.Popen. + :param process_input: Send to opened process. + :param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + exception.ProcessExecutionError unless + program exits with one of these code. + :param delay_on_retry: True | False. Defaults to True. If set to + True, wait a short amount of time + before retrying. + :param attempts: How many times to retry cmd. + :param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper FLAG. + + :raises exception.Error: on receiving unknown arguments + :raises exception.ProcessExecutionError: + + :returns: a tuple, (stdout, stderr) from the spawned process, or None if + the command fails. + """ + + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + shell = kwargs.pop('shell', False) + + if len(kwargs): + raise exception.Error(_('Got unknown keyword args ' + 'to utils.execute: %r') % kwargs) + + if run_as_root: + cmd = shlex.split(FLAGS.root_helper) + list(cmd) + cmd = map(str, cmd) + + while attempts > 0: + attempts -= 1 + try: + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) + _PIPE = subprocess.PIPE # pylint: disable=E1101 + obj = subprocess.Popen(cmd, + stdin=_PIPE, + stdout=_PIPE, + stderr=_PIPE, + close_fds=True, + shell=shell) + result = None + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() # pylint: disable=E1101 + _returncode = obj.returncode # pylint: disable=E1101 + if _returncode: + LOG.debug(_('Result was %s') % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise exception.ProcessExecutionError( + exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + return result + except exception.ProcessExecutionError: + if not attempts: + raise + else: + LOG.debug(_('%r failed. Retrying.'), cmd) + if delay_on_retry: + greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) + + +def trycmd(*args, **kwargs): + """ + A wrapper around execute() to more easily handle warnings and errors. + + Returns an (out, err) tuple of strings containing the output of + the command's stdout and stderr. If 'err' is not empty then the + command can be considered to have failed. + + :discard_warnings True | False. Defaults to False. If set to True, + then for succeeding commands, stderr is cleared + + """ + discard_warnings = kwargs.pop('discard_warnings', False) + + try: + out, err = execute(*args, **kwargs) + failed = False + except exception.ProcessExecutionError, exn: + out, err = '', str(exn) + LOG.debug(err) + failed = True + + if not failed and discard_warnings and err: + # Handle commands that output to stderr but otherwise succeed + LOG.debug(err) + err = '' + + return out, err + + +def ssh_execute(ssh, cmd, process_input=None, + addl_env=None, check_exit_code=True): + LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) + if addl_env: + raise exception.Error(_('Environment not supported over SSH')) + + if process_input: + # This is (probably) fixable if we need it... + raise exception.Error(_('process_input not supported over SSH')) + + stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) + channel = stdout_stream.channel + + #stdin.write('process_input would go here') + #stdin.flush() + + # NOTE(justinsb): This seems suspicious... + # ...other SSH clients have buffering issues with this approach + stdout = stdout_stream.read() + stderr = stderr_stream.read() + stdin_stream.close() + + exit_status = channel.recv_exit_status() + + # exit_status == -1 if no exit code was returned + if exit_status != -1: + LOG.debug(_('Result was %s') % exit_status) + if check_exit_code and exit_status != 0: + raise exception.ProcessExecutionError(exit_code=exit_status, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) + + return (stdout, stderr) + + +def cinderdir(): + import cinder + return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0] + + +def default_flagfile(filename='cinder.conf', args=None): + if args is None: + args = sys.argv + for arg in args: + if arg.find('flagfile') != -1: + return arg[arg.index('flagfile') + len('flagfile') + 1:] + else: + if not os.path.isabs(filename): + # turn relative filename into an absolute path + script_dir = os.path.dirname(inspect.stack()[-1][1]) + filename = os.path.abspath(os.path.join(script_dir, filename)) + if not os.path.exists(filename): + filename = "./cinder.conf" + if not os.path.exists(filename): + filename = '/etc/cinder/cinder.conf' + if os.path.exists(filename): + flagfile = '--flagfile=%s' % filename + args.insert(1, flagfile) + return filename + + +def debug(arg): + LOG.debug(_('debug in callback: %s'), arg) + return arg + + +def generate_uid(topic, size=8): + characters = '01234567890abcdefghijklmnopqrstuvwxyz' + choices = [random.choice(characters) for x in xrange(size)] + return '%s-%s' % (topic, ''.join(choices)) + + +# Default symbols to use for passwords. Avoids visually confusing characters. +# ~6 bits per symbol +DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l + + +# ~5 bits per symbol +EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O + + +def last_completed_audit_period(unit=None): + """This method gives you the most recently *completed* audit period. + + arguments: + units: string, one of 'hour', 'day', 'month', 'year' + Periods normally begin at the beginning (UTC) of the + period unit (So a 'day' period begins at midnight UTC, + a 'month' unit on the 1st, a 'year' on Jan, 1) + unit string may be appended with an optional offset + like so: 'day@18' This will begin the period at 18:00 + UTC. 'month@15' starts a monthly period on the 15th, + and year@3 begins a yearly one on March 1st. + + + returns: 2 tuple of datetimes (begin, end) + The begin timestamp of this audit period is the same as the + end of the previous.""" + if not unit: + unit = FLAGS.instance_usage_audit_period + + offset = 0 + if '@' in unit: + unit, offset = unit.split("@", 1) + offset = int(offset) + + rightnow = utcnow() + if unit not in ('month', 'day', 'year', 'hour'): + raise ValueError('Time period must be hour, day, month or year') + if unit == 'month': + if offset == 0: + offset = 1 + end = datetime.datetime(day=offset, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + year = rightnow.year + if 1 >= rightnow.month: + year -= 1 + month = 12 + (rightnow.month - 1) + else: + month = rightnow.month - 1 + end = datetime.datetime(day=offset, + month=month, + year=year) + year = end.year + if 1 >= end.month: + year -= 1 + month = 12 + (end.month - 1) + else: + month = end.month - 1 + begin = datetime.datetime(day=offset, month=month, year=year) + + elif unit == 'year': + if offset == 0: + offset = 1 + end = datetime.datetime(day=1, month=offset, year=rightnow.year) + if end >= rightnow: + end = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 2) + else: + begin = datetime.datetime(day=1, + month=offset, + year=rightnow.year - 1) + + elif unit == 'day': + end = datetime.datetime(hour=offset, + day=rightnow.day, + month=rightnow.month, + year=rightnow.year) + if end >= rightnow: + end = end - datetime.timedelta(days=1) + begin = end - datetime.timedelta(days=1) + + elif unit == 'hour': + end = rightnow.replace(minute=offset, second=0, microsecond=0) + if end >= rightnow: + end = end - datetime.timedelta(hours=1) + begin = end - datetime.timedelta(hours=1) + + return (begin, end) + + +def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): + """Generate a random password from the supplied symbol groups. + + At least one symbol from each group will be included. Unpredictable + results if length is less than the number of symbol groups. + + Believed to be reasonably secure (with a reasonable password length!) + + """ + r = random.SystemRandom() + + # NOTE(jerdfelt): Some password policies require at least one character + # from each group of symbols, so start off with one random character + # from each symbol group + password = [r.choice(s) for s in symbolgroups] + # If length < len(symbolgroups), the leading characters will only + # be from the first length groups. Try our best to not be predictable + # by shuffling and then truncating. + r.shuffle(password) + password = password[:length] + length -= len(password) + + # then fill with random characters from all symbol groups + symbols = ''.join(symbolgroups) + password.extend([r.choice(symbols) for _i in xrange(length)]) + + # finally shuffle to ensure first x characters aren't from a + # predictable group + r.shuffle(password) + + return ''.join(password) + + +def last_octet(address): + return int(address.split('.')[-1]) + + +def get_my_linklocal(interface): + try: + if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) + condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' + links = [re.search(condition, x) for x in if_str[0].split('\n')] + address = [w.group(1) for w in links if w is not None] + if address[0] is not None: + return address[0] + else: + raise exception.Error(_('Link Local address is not found.:%s') + % if_str) + except Exception as ex: + raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" + " :%(ex)s") % locals()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + return utcnow.override_time + return datetime.datetime.utcnow() + + +utcnow.override_time = None + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return time.mktime(utcnow().timetuple()) + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """Override utils.utcnow to return a constant time.""" + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overriden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overriden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def isotime(at=None): + """Stringify time in ISO 8601 format""" + if not at: + at = datetime.datetime.utcnow() + str = at.strftime(ISO_TIME_FORMAT) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + str += ('Z' if tz == 'UTC' else tz) + return str + + +def parse_isotime(timestr): + """Turn an iso formatted time back into a datetime.""" + try: + return iso8601.parse_date(timestr) + except (iso8601.ParseError, TypeError) as e: + raise ValueError(e.message) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC""" + offset = timestamp.utcoffset() + return timestamp.replace(tzinfo=None) - offset if offset else timestamp + + +def parse_mailmap(mailmap='.mailmap'): + mapping = {} + if os.path.exists(mailmap): + fp = open(mailmap, 'r') + for l in fp: + l = l.strip() + if not l.startswith('#') and ' ' in l: + canonical_email, alias = l.split(' ') + mapping[alias.lower()] = canonical_email.lower() + return mapping + + +def str_dict_replace(s, mapping): + for s1, s2 in mapping.iteritems(): + s = s.replace(s1, s2) + return s + + +class LazyPluggable(object): + """A pluggable backend loaded lazily based on some value.""" + + def __init__(self, pivot, **backends): + self.__backends = backends + self.__pivot = pivot + self.__backend = None + + def __get_backend(self): + if not self.__backend: + backend_name = FLAGS[self.__pivot] + if backend_name not in self.__backends: + raise exception.Error(_('Invalid backend: %s') % backend_name) + + backend = self.__backends[backend_name] + if isinstance(backend, tuple): + name = backend[0] + fromlist = backend[1] + else: + name = backend + fromlist = backend + + self.__backend = __import__(name, None, None, fromlist) + LOG.debug(_('backend %s'), self.__backend) + return self.__backend + + def __getattr__(self, key): + backend = self.__get_backend() + return getattr(backend, key) + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCall(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + self.f(*self.args, **self.kw) + if not self._running: + break + greenthread.sleep(interval) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_('in looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +def xhtml_escape(value): + """Escapes a string so it is valid within XML or XHTML. + + """ + return saxutils.escape(value, {'"': '"', "'": '''}) + + +def utf8(value): + """Try to turn a string into utf-8 if possible. + + Code is directly from the utf8 function in + http://github.com/facebook/tornado/blob/master/tornado/escape.py + + """ + if isinstance(value, unicode): + return value.encode('utf-8') + assert isinstance(value, str) + return value + + +def to_primitive(value, convert_instances=False, level=0): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod, + inspect.isfunction, inspect.isgeneratorfunction, + inspect.isgenerator, inspect.istraceback, inspect.isframe, + inspect.iscode, inspect.isbuiltin, inspect.isroutine, + inspect.isabstract] + for test in nasty: + if test(value): + return unicode(value) + + # value of itertools.count doesn't get caught by inspects + # above and results in infinite loop when list(value) is called. + if type(value) == itertools.count: + return unicode(value) + + # FIXME(vish): Workaround for LP bug 852095. Without this workaround, + # tests that raise an exception in a mocked method that + # has a @wrap_exception with a notifier will fail. If + # we up the dependency to 0.5.4 (when it is released) we + # can remove this workaround. + if getattr(value, '__module__', None) == 'mox': + return 'mock' + + if level > 3: + return '?' + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + if isinstance(value, (list, tuple)): + o = [] + for v in value: + o.append(to_primitive(v, convert_instances=convert_instances, + level=level)) + return o + elif isinstance(value, dict): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v, convert_instances=convert_instances, + level=level) + return o + elif isinstance(value, datetime.datetime): + return str(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems()), + convert_instances=convert_instances, + level=level) + elif hasattr(value, '__iter__'): + return to_primitive(list(value), level) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return to_primitive(value.__dict__, + convert_instances=convert_instances, + level=level + 1) + else: + return value + except TypeError, e: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) + + +def dumps(value): + try: + return json.dumps(value) + except TypeError: + pass + return json.dumps(to_primitive(value)) + + +def loads(s): + return json.loads(s) + + +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append(("cinder.utils", "dumps", TypeError, + "loads", ValueError)) + anyjson.force_implementation("cinder.utils") + + +class GreenLockFile(lockfile.FileLock): + """Implementation of lockfile that allows for a lock per greenthread. + + Simply implements lockfile:LockBase init with an addiontall suffix + on the unique name of the greenthread identifier + """ + def __init__(self, path, threaded=True): + self.path = path + self.lock_file = os.path.abspath(path) + ".lock" + self.hostname = socket.gethostname() + self.pid = os.getpid() + if threaded: + t = threading.current_thread() + # Thread objects in Python 2.4 and earlier do not have ident + # attrs. Worm around that. + ident = getattr(t, "ident", hash(t)) or hash(t) + gident = corolocal.get_ident() + self.tname = "-%x-%x" % (ident & 0xffffffff, gident & 0xffffffff) + else: + self.tname = "" + dirname = os.path.dirname(self.lock_file) + self.unique_name = os.path.join(dirname, + "%s%s.%s" % (self.hostname, + self.tname, + self.pid)) + + +_semaphores = {} + + +def synchronized(name, external=False): + """Synchronization decorator. + + Decorating a method like so:: + + @synchronized('mylock') + def foo(self, *args): + ... + + ensures that only one thread will execute the bar method at a time. + + Different methods can share the same lock:: + + @synchronized('mylock') + def foo(self, *args): + ... + + @synchronized('mylock') + def bar(self, *args): + ... + + This way only one of either foo or bar can be executing at a time. + + The external keyword argument denotes whether this lock should work across + multiple processes. This means that if two different workers both run a + a method decorated with @synchronized('mylock', external=True), only one + of them will execute at a time. + + Important limitation: you can only have one external lock running per + thread at a time. For example the following will fail: + + @utils.synchronized('testlock1', external=True) + def outer_lock(): + + @utils.synchronized('testlock2', external=True) + def inner_lock(): + pass + inner_lock() + + outer_lock() + + """ + + def wrap(f): + @functools.wraps(f) + def inner(*args, **kwargs): + # NOTE(soren): If we ever go natively threaded, this will be racy. + # See http://stackoverflow.com/questions/5390569/dyn + # amically-allocating-and-destroying-mutexes + if name not in _semaphores: + _semaphores[name] = semaphore.Semaphore() + sem = _semaphores[name] + LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + with sem: + LOG.debug(_('Got semaphore "%(lock)s" for method ' + '"%(method)s"...'), {'lock': name, + 'method': f.__name__}) + if external and not FLAGS.disable_process_locking: + LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' + 'method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + lock_file_path = os.path.join(FLAGS.lock_path, + 'cinder-%s' % name) + lock = GreenLockFile(lock_file_path) + with lock: + LOG.debug(_('Got file lock "%(lock)s" for ' + 'method "%(method)s"...'), + {'lock': name, 'method': f.__name__}) + retval = f(*args, **kwargs) + else: + retval = f(*args, **kwargs) + + # If no-one else is waiting for it, delete it. + # See note about possible raciness above. + if not sem.balance < 1: + del _semaphores[name] + + return retval + return inner + return wrap + + +def cleanup_file_locks(): + """clean up stale locks left behind by process failures + + The lockfile module, used by @synchronized, can leave stale lockfiles + behind after process failure. These locks can cause process hangs + at startup, when a process deadlocks on a lock which will never + be unlocked. + + Intended to be called at service startup. + + """ + + # NOTE(mikeyp) this routine incorporates some internal knowledge + # from the lockfile module, and this logic really + # should be part of that module. + # + # cleanup logic: + # 1) look for the lockfile modules's 'sentinel' files, of the form + # hostname.[thread-.*]-pid, extract the pid. + # if pid doesn't match a running process, delete the file since + # it's from a dead process. + # 2) check for the actual lockfiles. if lockfile exists with linkcount + # of 1, it's bogus, so delete it. A link count >= 2 indicates that + # there are probably sentinels still linked to it from active + # processes. This check isn't perfect, but there is no way to + # reliably tell which sentinels refer to which lock in the + # lockfile implementation. + + if FLAGS.disable_process_locking: + return + + hostname = socket.gethostname() + sentinel_re = hostname + r'\..*-(\d+$)' + lockfile_re = r'cinder-.*\.lock' + files = os.listdir(FLAGS.lock_path) + + # cleanup sentinels + for filename in files: + match = re.match(sentinel_re, filename) + if match is None: + continue + pid = match.group(1) + LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s'), + {'filename': filename, 'pid': pid}) + try: + os.kill(int(pid), 0) + except OSError, e: + # PID wasn't found + delete_if_exists(os.path.join(FLAGS.lock_path, filename)) + LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s'), + {'filename': filename, 'pid': pid}) + + # cleanup lock files + for filename in files: + match = re.match(lockfile_re, filename) + if match is None: + continue + try: + stat_info = os.stat(os.path.join(FLAGS.lock_path, filename)) + except OSError as e: + if e.errno == errno.ENOENT: + continue + else: + raise + LOG.debug(_('Found lockfile %(file)s with link count %(count)d'), + {'file': filename, 'count': stat_info.st_nlink}) + if stat_info.st_nlink == 1: + delete_if_exists(os.path.join(FLAGS.lock_path, filename)) + LOG.debug(_('Cleaned lockfile %(file)s with link count %(count)d'), + {'file': filename, 'count': stat_info.st_nlink}) + + +def delete_if_exists(pathname): + """delete a file, but ignore file not found error""" + + try: + os.unlink(pathname) + except OSError as e: + if e.errno == errno.ENOENT: + return + else: + raise + + +def get_from_path(items, path): + """Returns a list of items matching the specified path. + + Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item + in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the + intermediate results are lists it will treat each list item individually. + A 'None' in items or any child expressions will be ignored, this function + will not throw because of None (anywhere) in items. The returned list + will contain no None values. + + """ + if path is None: + raise exception.Error('Invalid mini_xpath') + + (first_token, sep, remainder) = path.partition('/') + + if first_token == '': + raise exception.Error('Invalid mini_xpath') + + results = [] + + if items is None: + return results + + if not isinstance(items, list): + # Wrap single objects in a list + items = [items] + + for item in items: + if item is None: + continue + get_method = getattr(item, 'get', None) + if get_method is None: + continue + child = get_method(first_token) + if child is None: + continue + if isinstance(child, list): + # Flatten intermediate lists + for x in child: + results.append(x) + else: + results.append(child) + + if not sep: + # No more tokens + return results + else: + return get_from_path(results, remainder) + + +def flatten_dict(dict_, flattened=None): + """Recursively flatten a nested dictionary.""" + flattened = flattened or {} + for key, value in dict_.iteritems(): + if hasattr(value, 'iteritems'): + flatten_dict(value, flattened) + else: + flattened[key] = value + return flattened + + +def partition_dict(dict_, keys): + """Return two dicts, one with `keys` the other with everything else.""" + intersection = {} + difference = {} + for key, value in dict_.iteritems(): + if key in keys: + intersection[key] = value + else: + difference[key] = value + return intersection, difference + + +def map_dict_keys(dict_, key_map): + """Return a dict in which the dictionaries keys are mapped to new keys.""" + mapped = {} + for key, value in dict_.iteritems(): + mapped_key = key_map[key] if key in key_map else key + mapped[mapped_key] = value + return mapped + + +def subset_dict(dict_, keys): + """Return a dict that only contains a subset of keys.""" + subset = partition_dict(dict_, keys)[0] + return subset + + +def check_isinstance(obj, cls): + """Checks that obj is of type cls, and lets PyLint infer types.""" + if isinstance(obj, cls): + return obj + raise Exception(_('Expected object of type: %s') % (str(cls))) + # TODO(justinsb): Can we make this better?? + return cls() # Ugly PyLint hack + + +def parse_server_string(server_str): + """ + Parses the given server_string and returns a list of host and port. + If it's not a combination of host part and port, the port element + is a null string. If the input is invalid expression, return a null + list. + """ + try: + # First of all, exclude pure IPv6 address (w/o port). + if netaddr.valid_ipv6(server_str): + return (server_str, '') + + # Next, check if this is IPv6 address with a port number combination. + if server_str.find("]:") != -1: + (address, port) = server_str.replace('[', '', 1).split(']:') + return (address, port) + + # Third, check if this is a combination of an address and a port + if server_str.find(':') == -1: + return (server_str, '') + + # This must be a combination of an address and a port + (address, port) = server_str.split(':') + return (address, port) + + except Exception: + LOG.debug(_('Invalid server_string: %s'), server_str) + return ('', '') + + +def gen_uuid(): + return uuid.uuid4() + + +def is_uuid_like(val): + """For our purposes, a UUID is a string in canonical form: + + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + try: + uuid.UUID(val) + return True + except (TypeError, ValueError, AttributeError): + return False + + +def bool_from_str(val): + """Convert a string representation of a bool into a bool value""" + + if not val: + return False + try: + return True if int(val) else False + except ValueError: + return val.lower() == 'true' + + +def is_valid_ipv4(address): + """valid the address strictly as per format xxx.xxx.xxx.xxx. + where xxx is a value between 0 and 255. + """ + parts = address.split(".") + if len(parts) != 4: + return False + for item in parts: + try: + if not 0 <= int(item) <= 255: + return False + except ValueError: + return False + return True + + +def is_valid_cidr(address): + """Check if the provided ipv4 or ipv6 address is a valid + CIDR address or not""" + try: + # Validate the correct CIDR Address + netaddr.IPNetwork(address) + except netaddr.core.AddrFormatError: + return False + except UnboundLocalError: + # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in + # https://github.com/drkjam/netaddr/issues/2) + return False + + # Prior validation partially verify /xx part + # Verify it here + ip_segment = address.split('/') + + if (len(ip_segment) <= 1 or + ip_segment[1] == ''): + return False + + return True + + +def monkey_patch(): + """ If the Flags.monkey_patch set as True, + this function patches a decorator + for all functions in specified modules. + You can set decorators for each modules + using FLAGS.monkey_patch_modules. + The format is "Module path:Decorator function". + Example: 'cinder.api.ec2.cloud:cinder.notifier.api.notify_decorator' + + Parameters of the decorator is as follows. + (See cinder.notifier.api.notify_decorator) + + name - name of the function + function - object of the function + """ + # If FLAGS.monkey_patch is not True, this function do nothing. + if not FLAGS.monkey_patch: + return + # Get list of modules and decorators + for module_and_decorator in FLAGS.monkey_patch_modules: + module, decorator_name = module_and_decorator.split(':') + # import decorator function + decorator = importutils.import_class(decorator_name) + __import__(module) + # Retrieve module information using pyclbr + module_data = pyclbr.readmodule_ex(module) + for key in module_data.keys(): + # set the decorator for the class methods + if isinstance(module_data[key], pyclbr.Class): + clz = importutils.import_class("%s.%s" % (module, key)) + for method, func in inspect.getmembers(clz, inspect.ismethod): + setattr(clz, method, + decorator("%s.%s.%s" % (module, key, method), func)) + # set the decorator for the function + if isinstance(module_data[key], pyclbr.Function): + func = importutils.import_class("%s.%s" % (module, key)) + setattr(sys.modules[module], key, + decorator("%s.%s" % (module, key), func)) + + +def convert_to_list_dict(lst, label): + """Convert a value or list into a list of dicts""" + if not lst: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + +def timefunc(func): + """Decorator that logs how long a particular function took to execute""" + @functools.wraps(func) + def inner(*args, **kwargs): + start_time = time.time() + try: + return func(*args, **kwargs) + finally: + total_time = time.time() - start_time + LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") % + dict(name=func.__name__, total_time=total_time)) + return inner + + +def generate_glance_url(): + """Generate the URL to glance.""" + # TODO(jk0): This will eventually need to take SSL into consideration + # when supported in glance. + return "http://%s:%d" % (FLAGS.glance_host, FLAGS.glance_port) + + +@contextlib.contextmanager +def save_and_reraise_exception(): + """Save current exception, run some code and then re-raise. + + In some cases the exception context can be cleared, resulting in None + being attempted to be reraised after an exception handler is run. This + can happen when eventlet switches greenthreads or when running an + exception handler, code raises and catches an exception. In both + cases the exception context will be cleared. + + To work around this, we save the exception state, run handler code, and + then re-raise the original exception. If another exception occurs, the + saved exception is logged and the new exception is reraised. + """ + type_, value, traceback = sys.exc_info() + try: + yield + except Exception: + # NOTE(jkoelker): Using LOG.error here since it accepts exc_info + # as a kwargs. + LOG.error(_('Original exception being dropped'), + exc_info=(type_, value, traceback)) + raise + raise type_, value, traceback + + +@contextlib.contextmanager +def logging_error(message): + """Catches exception, write message to the log, re-raise. + This is a common refinement of save_and_reraise that writes a specific + message to the log. + """ + try: + yield + except Exception as error: + with save_and_reraise_exception(): + LOG.exception(message) + + +@contextlib.contextmanager +def remove_path_on_error(path): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + """ + try: + yield + except Exception: + with save_and_reraise_exception(): + delete_if_exists(path) + + +def make_dev_path(dev, partition=None, base='/dev'): + """Return a path to a particular device. + + >>> make_dev_path('xvdc') + /dev/xvdc + + >>> make_dev_path('xvdc', 1) + /dev/xvdc1 + """ + path = os.path.join(base, dev) + if partition: + path += str(partition) + return path + + +def total_seconds(td): + """Local total_seconds implementation for compatibility with python 2.6""" + if hasattr(td, 'total_seconds'): + return td.total_seconds() + else: + return ((td.days * 86400 + td.seconds) * 10 ** 6 + + td.microseconds) / 10.0 ** 6 + + +def sanitize_hostname(hostname): + """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" + if isinstance(hostname, unicode): + hostname = hostname.encode('latin-1', 'ignore') + + hostname = re.sub('[ _]', '-', hostname) + hostname = re.sub('[^\w.-]+', '', hostname) + hostname = hostname.lower() + hostname = hostname.strip('.-') + + return hostname + + +def read_cached_file(filename, cache_info, reload_func=None): + """Read from a file if it has been modified. + + :param cache_info: dictionary to hold opaque cache. + :param reload_func: optional function to be called with data when + file is reloaded due to a modification. + + :returns: data from file + + """ + mtime = os.path.getmtime(filename) + if not cache_info or mtime != cache_info.get('mtime'): + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + if reload_func: + reload_func(cache_info['data']) + return cache_info['data'] + + +def hash_file(file_like_object): + """Generate a hash for the contents of a file.""" + checksum = hashlib.sha1() + any(map(checksum.update, iter(lambda: file_like_object.read(32768), ''))) + return checksum.hexdigest() + + +@contextlib.contextmanager +def temporary_mutation(obj, **kwargs): + """Temporarily set the attr on a particular object to a given value then + revert when finished. + + One use of this is to temporarily set the read_deleted flag on a context + object: + + with temporary_mutation(context, read_deleted="yes"): + do_something_that_needed_deleted_objects() + """ + NOT_PRESENT = object() + + old_values = {} + for attr, new_value in kwargs.items(): + old_values[attr] = getattr(obj, attr, NOT_PRESENT) + setattr(obj, attr, new_value) + + try: + yield + finally: + for attr, old_value in old_values.items(): + if old_value is NOT_PRESENT: + del obj[attr] + else: + setattr(obj, attr, old_value) + + +def warn_deprecated_class(cls, msg): + """ + Issues a warning to indicate that the given class is deprecated. + If a message is given, it is appended to the deprecation warning. + """ + + fullname = '%s.%s' % (cls.__module__, cls.__name__) + if msg: + fullmsg = _("Class %(fullname)s is deprecated: %(msg)s") + else: + fullmsg = _("Class %(fullname)s is deprecated") + + # Issue the warning + warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3) + + +def warn_deprecated_function(func, msg): + """ + Issues a warning to indicate that the given function is + deprecated. If a message is given, it is appended to the + deprecation warning. + """ + + name = func.__name__ + + # Find the function's definition + sourcefile = inspect.getsourcefile(func) + + # Find the line number, if possible + if inspect.ismethod(func): + code = func.im_func.func_code + else: + code = func.func_code + lineno = getattr(code, 'co_firstlineno', None) + + if lineno is None: + location = sourcefile + else: + location = "%s:%d" % (sourcefile, lineno) + + # Build up the message + if msg: + fullmsg = _("Function %(name)s in %(location)s is deprecated: %(msg)s") + else: + fullmsg = _("Function %(name)s in %(location)s is deprecated") + + # Issue the warning + warnings.warn(fullmsg % locals(), DeprecationWarning, stacklevel=3) + + +def _stubout(klass, message): + """ + Scans a class and generates wrapping stubs for __new__() and every + class and static method. Returns a dictionary which can be passed + to type() to generate a wrapping class. + """ + + overrides = {} + + def makestub_class(name, func): + """ + Create a stub for wrapping class methods. + """ + + def stub(cls, *args, **kwargs): + warn_deprecated_class(klass, message) + return func(*args, **kwargs) + + # Overwrite the stub's name + stub.__name__ = name + stub.func_name = name + + return classmethod(stub) + + def makestub_static(name, func): + """ + Create a stub for wrapping static methods. + """ + + def stub(*args, **kwargs): + warn_deprecated_class(klass, message) + return func(*args, **kwargs) + + # Overwrite the stub's name + stub.__name__ = name + stub.func_name = name + + return staticmethod(stub) + + for name, kind, _klass, _obj in inspect.classify_class_attrs(klass): + # We're only interested in __new__(), class methods, and + # static methods... + if (name != '__new__' and + kind not in ('class method', 'static method')): + continue + + # Get the function... + func = getattr(klass, name) + + # Override it in the class + if kind == 'class method': + stub = makestub_class(name, func) + elif kind == 'static method' or name == '__new__': + stub = makestub_static(name, func) + + # Save it in the overrides dictionary... + overrides[name] = stub + + # Apply the overrides + for name, stub in overrides.items(): + setattr(klass, name, stub) + + +def deprecated(message=''): + """ + Marks a function, class, or method as being deprecated. For + functions and methods, emits a warning each time the function or + method is called. For classes, generates a new subclass which + will emit a warning each time the class is instantiated, or each + time any class or static method is called. + + If a message is passed to the decorator, that message will be + appended to the emitted warning. This may be used to suggest an + alternate way of achieving the desired effect, or to explain why + the function, class, or method is deprecated. + """ + + def decorator(f_or_c): + # Make sure we can deprecate it... + if not callable(f_or_c) or isinstance(f_or_c, types.ClassType): + warnings.warn("Cannot mark object %r as deprecated" % f_or_c, + DeprecationWarning, stacklevel=2) + return f_or_c + + # If we're deprecating a class, create a subclass of it and + # stub out all the class and static methods + if inspect.isclass(f_or_c): + klass = f_or_c + _stubout(klass, message) + return klass + + # OK, it's a function; use a traditional wrapper... + func = f_or_c + + @functools.wraps(func) + def wrapper(*args, **kwargs): + warn_deprecated_function(func, message) + + return func(*args, **kwargs) + + return wrapper + return decorator + + +def _showwarning(message, category, filename, lineno, file=None, line=None): + """ + Redirect warnings into logging. + """ + + fmtmsg = warnings.formatwarning(message, category, filename, lineno, line) + LOG.warning(fmtmsg) + + +# Install our warnings handler +warnings.showwarning = _showwarning + + +def service_is_up(service): + """Check whether a service is up based on last heartbeat.""" + last_heartbeat = service['updated_at'] or service['created_at'] + # Timestamps in DB are UTC. + elapsed = total_seconds(utcnow() - last_heartbeat) + return abs(elapsed) <= FLAGS.service_down_time + + +def generate_mac_address(): + """Generate an Ethernet MAC address.""" + # NOTE(vish): We would prefer to use 0xfe here to ensure that linux + # bridge mac addresses don't change, but it appears to + # conflict with libvirt, so we use the next highest octet + # that has the unicast and locally administered bits set + # properly: 0xfa. + # Discussion: https://bugs.launchpad.net/cinder/+bug/921838 + mac = [0xfa, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + +def read_file_as_root(file_path): + """Secure helper to read file as root.""" + try: + out, _err = execute('cat', file_path, run_as_root=True) + return out + except exception.ProcessExecutionError: + raise exception.FileNotFound(file_path=file_path) + + +@contextlib.contextmanager +def temporary_chown(path, owner_uid=None): + """Temporarily chown a path. + + :params owner_uid: UID of temporary owner (defaults to current user) + """ + if owner_uid is None: + owner_uid = os.getuid() + + orig_uid = os.stat(path).st_uid + + if orig_uid != owner_uid: + execute('chown', owner_uid, path, run_as_root=True) + try: + yield + finally: + if orig_uid != owner_uid: + execute('chown', orig_uid, path, run_as_root=True) + + +@contextlib.contextmanager +def tempdir(**kwargs): + tmpdir = tempfile.mkdtemp(**kwargs) + try: + yield tmpdir + finally: + try: + shutil.rmtree(tmpdir) + except OSError, e: + LOG.debug(_('Could not remove tmpdir: %s'), str(e)) + + +def strcmp_const_time(s1, s2): + """Constant-time string comparison. + + :params s1: the first string + :params s2: the second string + + :return: True if the strings are equal. + + This function takes two strings and compares them. It is intended to be + used when doing a comparison for authentication purposes to help guard + against timing attacks. + """ + if len(s1) != len(s2): + return False + result = 0 + for (a, b) in zip(s1, s2): + result |= ord(a) ^ ord(b) + return result == 0 + + +class UndoManager(object): + """Provides a mechanism to facilitate rolling back a series of actions + when an exception is raised. + """ + def __init__(self): + self.undo_stack = [] + + def undo_with(self, undo_func): + self.undo_stack.append(undo_func) + + def _rollback(self): + for undo_func in reversed(self.undo_stack): + undo_func() + + def rollback_and_reraise(self, msg=None, **kwargs): + """Rollback a series of actions then re-raise the exception. + + .. note:: (sirp) This should only be called within an + exception handler. + """ + with save_and_reraise_exception(): + if msg: + LOG.exception(msg, **kwargs) + + self._rollback() diff --git a/cinder/version.py b/cinder/version.py new file mode 100644 index 00000000000..45b85c5e8bf --- /dev/null +++ b/cinder/version.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +CINDER_VERSION = ['2012', '2', None] +YEAR, COUNT, REVISION = CINDER_VERSION +FINAL = False # This becomes true at Release Candidate time + + +def canonical_version_string(): + return '.'.join(filter(None, CINDER_VERSION)) + + +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return 'LOCALBRANCH:LOCALREVISION' + + +def version_string_with_vcs(): + return '%s-%s' % (canonical_version_string(), vcs_version_string()) diff --git a/cinder/volume/__init__.py b/cinder/volume/__init__.py new file mode 100644 index 00000000000..e810a93d48f --- /dev/null +++ b/cinder/volume/__init__.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Importing full names to not pollute the namespace and cause possible +# collisions with use of 'from cinder.volume import ' elsewhere. +import cinder.flags +import cinder.openstack.common.importutils + +API = cinder.openstack.common.importutils.import_class( + cinder.flags.FLAGS.volume_api_class) diff --git a/cinder/volume/api.py b/cinder/volume/api.py new file mode 100644 index 00000000000..9c816e95bd8 --- /dev/null +++ b/cinder/volume/api.py @@ -0,0 +1,371 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to volumes. +""" + +import functools + +from eventlet import greenthread + +from cinder import exception +from cinder import flags +from cinder import log as logging +import cinder.policy +from cinder import quota +from cinder import rpc +from cinder import utils +from cinder.db import base + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'cinder.volume.manager') + +LOG = logging.getLogger(__name__) + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution + + This decorator requires the first 3 args of the wrapped function + to be (self, context, volume) + """ + @functools.wraps(func) + def wrapped(self, context, target_obj, *args, **kwargs): + check_policy(context, func.__name__, target_obj) + return func(self, context, target_obj, *args, **kwargs) + + return wrapped + + +def check_policy(context, action, target_obj=None): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + target.update(target_obj or {}) + _action = 'volume:%s' % action + cinder.policy.enforce(context, _action, target) + + +class API(base.Base): + """API for interacting with the volume manager.""" + + def create(self, context, size, name, description, snapshot=None, + volume_type=None, metadata=None, availability_zone=None): + check_policy(context, 'create') + if snapshot is not None: + if snapshot['status'] != "available": + msg = _("status must be available") + raise exception.InvalidSnapshot(reason=msg) + if not size: + size = snapshot['volume_size'] + + snapshot_id = snapshot['id'] + else: + snapshot_id = None + + if quota.allowed_volumes(context, 1, size) < 1: + pid = context.project_id + LOG.warn(_("Quota exceeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) + raise exception.QuotaError(code="VolumeSizeTooLarge") + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if volume_type is None: + volume_type_id = None + else: + volume_type_id = volume_type.get('id', None) + + options = { + 'size': size, + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': snapshot_id, + 'availability_zone': availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': name, + 'display_description': description, + 'volume_type_id': volume_type_id, + 'metadata': metadata, + } + + volume = self.db.volume_create(context, options) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"topic": FLAGS.volume_topic, + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) + return volume + + # TODO(yamahata): eliminate dumb polling + def wait_creation(self, context, volume): + volume_id = volume['id'] + while True: + volume = self.get(context, volume_id) + if volume['status'] != 'creating': + return + greenthread.sleep(1) + + @wrap_check_policy + def delete(self, context, volume): + volume_id = volume['id'] + if not volume['host']: + # NOTE(vish): scheduling failed, so delete it + self.db.volume_destroy(context, volume_id) + return + if volume['status'] not in ["available", "error"]: + msg = _("Volume status must be available or error") + raise exception.InvalidVolume(reason=msg) + + snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) + if len(snapshots): + msg = _("Volume still has %d dependent snapshots") % len(snapshots) + raise exception.InvalidVolume(reason=msg) + + now = utils.utcnow() + self.db.volume_update(context, volume_id, {'status': 'deleting', + 'terminated_at': now}) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume_id}}) + + @wrap_check_policy + def update(self, context, volume, fields): + self.db.volume_update(context, volume['id'], fields) + + def get(self, context, volume_id): + rv = self.db.volume_get(context, volume_id) + volume = dict(rv.iteritems()) + check_policy(context, 'get', volume) + return volume + + def get_all(self, context, search_opts={}): + check_policy(context, 'get_all') + if context.is_admin: + volumes = self.db.volume_get_all(context) + else: + volumes = self.db.volume_get_all_by_project(context, + context.project_id) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_metadata_match(volume, searchdict): + volume_metadata = {} + for i in volume.get('volume_metadata'): + volume_metadata[i['key']] = i['value'] + + for k, v in searchdict.iteritems(): + if (k not in volume_metadata.keys() or + volume_metadata[k] != v): + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'metadata': _check_metadata_match} + + result = [] + for volume in volumes: + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(volume, values): + result.append(volume) + break + volumes = result + return volumes + + def get_snapshot(self, context, snapshot_id): + check_policy(context, 'get_snapshot') + rv = self.db.snapshot_get(context, snapshot_id) + return dict(rv.iteritems()) + + def get_all_snapshots(self, context): + check_policy(context, 'get_all_snapshots') + if context.is_admin: + return self.db.snapshot_get_all(context) + return self.db.snapshot_get_all_by_project(context, context.project_id) + + @wrap_check_policy + def check_attach(self, context, volume): + # TODO(vish): abstract status checking? + if volume['status'] != "available": + msg = _("status must be available") + raise exception.InvalidVolume(reason=msg) + if volume['attach_status'] == "attached": + msg = _("already attached") + raise exception.InvalidVolume(reason=msg) + + @wrap_check_policy + def check_detach(self, context, volume): + # TODO(vish): abstract status checking? + if volume['status'] == "available": + msg = _("already detached") + raise exception.InvalidVolume(reason=msg) + + def remove_from_compute(self, context, volume, instance_id, host): + """Remove volume from specified compute host.""" + rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "remove_volume_connection", + "args": {'instance_id': instance_id, + 'volume_id': volume['id']}}) + + @wrap_check_policy + def reserve_volume(self, context, volume): + self.update(context, volume, {"status": "attaching"}) + + @wrap_check_policy + def unreserve_volume(self, context, volume): + if volume['status'] == "attaching": + self.update(context, volume, {"status": "available"}) + + @wrap_check_policy + def attach(self, context, volume, instance_id, mountpoint): + host = volume['host'] + queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + return rpc.call(context, queue, + {"method": "attach_volume", + "args": {"volume_id": volume['id'], + "instance_id": instance_id, + "mountpoint": mountpoint}}) + + @wrap_check_policy + def detach(self, context, volume): + host = volume['host'] + queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + return rpc.call(context, queue, + {"method": "detach_volume", + "args": {"volume_id": volume['id']}}) + + @wrap_check_policy + def initialize_connection(self, context, volume, connector): + host = volume['host'] + queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + return rpc.call(context, queue, + {"method": "initialize_connection", + "args": {"volume_id": volume['id'], + "connector": connector}}) + + @wrap_check_policy + def terminate_connection(self, context, volume, connector): + self.unreserve_volume(context, volume) + host = volume['host'] + queue = self.db.queue_get_for(context, FLAGS.volume_topic, host) + return rpc.call(context, queue, + {"method": "terminate_connection", + "args": {"volume_id": volume['id'], + "connector": connector}}) + + def _create_snapshot(self, context, volume, name, description, + force=False): + check_policy(context, 'create_snapshot', volume) + + if ((not force) and (volume['status'] != "available")): + msg = _("must be available") + raise exception.InvalidVolume(reason=msg) + + options = { + 'volume_id': volume['id'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'status': "creating", + 'progress': '0%', + 'volume_size': volume['size'], + 'display_name': name, + 'display_description': description} + + snapshot = self.db.snapshot_create(context, options) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "create_snapshot", + "args": {"volume_id": volume['id'], + "snapshot_id": snapshot['id']}}) + return snapshot + + def create_snapshot(self, context, volume, name, description): + return self._create_snapshot(context, volume, name, description, + False) + + def create_snapshot_force(self, context, volume, name, description): + return self._create_snapshot(context, volume, name, description, + True) + + @wrap_check_policy + def delete_snapshot(self, context, snapshot): + if snapshot['status'] not in ["available", "error"]: + msg = _("Volume Snapshot status must be available or error") + raise exception.InvalidVolume(reason=msg) + self.db.snapshot_update(context, snapshot['id'], + {'status': 'deleting'}) + volume = self.db.volume_get(context, snapshot['volume_id']) + host = volume['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_snapshot", + "args": {"snapshot_id": snapshot['id']}}) + + @wrap_check_policy + def get_volume_metadata(self, context, volume): + """Get all metadata associated with a volume.""" + rv = self.db.volume_metadata_get(context, volume['id']) + return dict(rv.iteritems()) + + @wrap_check_policy + def delete_volume_metadata(self, context, volume, key): + """Delete the given metadata item from an volume.""" + self.db.volume_metadata_delete(context, volume['id'], key) + + @wrap_check_policy + def update_volume_metadata(self, context, volume, metadata, delete=False): + """Updates or creates volume metadata. + + If delete is True, metadata items that are not specified in the + `metadata` argument will be deleted. + + """ + if delete: + _metadata = metadata + else: + _metadata = self.get_volume_metadata(context, volume['id']) + _metadata.update(metadata) + + self.db.volume_metadata_update(context, volume['id'], _metadata, True) + return _metadata + + def get_volume_metadata_value(self, volume, key): + """Get value of particular metadata key.""" + metadata = volume.get('volume_metadata') + if metadata: + for i in volume['volume_metadata']: + if i['key'] == key: + return i['value'] + return None diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py new file mode 100644 index 00000000000..5e640bd8b6b --- /dev/null +++ b/cinder/volume/driver.py @@ -0,0 +1,709 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for volumes. + +""" + +import time + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import utils +from cinder.volume import iscsi + + +LOG = logging.getLogger(__name__) + +volume_opts = [ + cfg.StrOpt('volume_group', + default='cinder-volumes', + help='Name for the VG that will contain exported volumes'), + cfg.StrOpt('num_shell_tries', + default=3, + help='number of times to attempt to run flakey shell commands'), + cfg.StrOpt('num_iscsi_scan_tries', + default=3, + help='number of times to rescan iSCSI target to find volume'), + cfg.IntOpt('iscsi_num_targets', + default=100, + help='Number of iscsi target ids per host'), + cfg.StrOpt('iscsi_target_prefix', + default='iqn.2010-10.org.openstack:', + help='prefix for iscsi volumes'), + cfg.StrOpt('iscsi_ip_address', + default='$my_ip', + help='use this ip for iscsi'), + cfg.IntOpt('iscsi_port', + default=3260, + help='The port that the iSCSI daemon is listening on'), + cfg.StrOpt('rbd_pool', + default='rbd', + help='the RADOS pool in which rbd volumes are stored'), + cfg.StrOpt('rbd_user', + default=None, + help='the RADOS client name for accessing rbd volumes'), + cfg.StrOpt('rbd_secret_uuid', + default=None, + help='the libvirt uuid of the secret for the rbd_user' + 'volumes'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(volume_opts) + + +class VolumeDriver(object): + """Executes commands relating to Volumes.""" + def __init__(self, execute=utils.execute, *args, **kwargs): + # NOTE(vish): db is set by Manager + self.db = None + self.set_execute(execute) + + def set_execute(self, execute): + self._execute = execute + + def _try_execute(self, *command, **kwargs): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + tries = 0 + while True: + try: + self._execute(*command, **kwargs) + return True + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= FLAGS.num_shell_tries: + raise + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) + time.sleep(tries ** 2) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + out, err = self._execute('vgs', '--noheadings', '-o', 'name', + run_as_root=True) + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: + raise exception.Error(_("volume group %s doesn't exist") + % FLAGS.volume_group) + + def _create_volume(self, volume_name, sizestr): + self._try_execute('lvcreate', '-L', sizestr, '-n', + volume_name, FLAGS.volume_group, run_as_root=True) + + def _copy_volume(self, srcstr, deststr, size_in_g): + self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr, + 'count=%d' % (size_in_g * 1024), 'bs=1M', + run_as_root=True) + + def _volume_not_present(self, volume_name): + path_name = '%s/%s' % (FLAGS.volume_group, volume_name) + try: + self._try_execute('lvdisplay', path_name, run_as_root=True) + except Exception as e: + # If the volume isn't present + return True + return False + + def _delete_volume(self, volume, size_in_g): + """Deletes a logical volume.""" + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._copy_volume('/dev/zero', self.local_path(volume), size_in_g) + self._try_execute('lvremove', '-f', "%s/%s" % + (FLAGS.volume_group, + self._escape_snapshot(volume['name'])), + run_as_root=True) + + def _sizestr(self, size_in_g): + if int(size_in_g) == 0: + return '100M' + return '%sG' % size_in_g + + # Linux LVM reserves name that starts with snapshot, so that + # such volume name can't be created. Mangle it. + def _escape_snapshot(self, snapshot_name): + if not snapshot_name.startswith('snapshot'): + return snapshot_name + return '_' + snapshot_name + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + if self._volume_not_present(volume['name']): + # If the volume isn't present, then don't attempt to delete + return True + + # TODO(yamahata): lvm can't delete origin volume only without + # deleting derived snapshots. Can we do something fancy? + out, err = self._execute('lvdisplay', '--noheading', + '-C', '-o', 'Attr', + '%s/%s' % (FLAGS.volume_group, + volume['name']), + run_as_root=True) + # fake_execute returns None resulting unit test error + if out: + out = out.strip() + if (out[0] == 'o') or (out[0] == 'O'): + raise exception.VolumeIsBusy(volume_name=volume['name']) + + self._delete_volume(volume, volume['size']) + + def create_snapshot(self, snapshot): + """Creates a snapshot.""" + orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name']) + self._try_execute('lvcreate', '-L', + self._sizestr(snapshot['volume_size']), + '--name', self._escape_snapshot(snapshot['name']), + '--snapshot', orig_lv_name, run_as_root=True) + + def delete_snapshot(self, snapshot): + """Deletes a snapshot.""" + if self._volume_not_present(self._escape_snapshot(snapshot['name'])): + # If the snapshot isn't present, then don't attempt to delete + return True + + # TODO(yamahata): zeroing out the whole snapshot triggers COW. + # it's quite slow. + self._delete_volume(snapshot, snapshot['volume_size']) + + def local_path(self, volume): + # NOTE(vish): stops deprecation warning + escaped_group = FLAGS.volume_group.replace('-', '--') + escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + raise NotImplementedError() + + def create_export(self, context, volume): + """Exports the volume. Can optionally return a Dictionary of changes + to the volume object to be persisted.""" + raise NotImplementedError() + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + raise NotImplementedError() + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + raise NotImplementedError() + + def initialize_connection(self, volume, connector): + """Allow connection to connector and return connection info.""" + raise NotImplementedError() + + def terminate_connection(self, volume, connector): + """Disallow connection from connector""" + raise NotImplementedError() + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + return None + + def do_setup(self, context): + """Any initialization the volume driver does while starting""" + pass + + +class ISCSIDriver(VolumeDriver): + """Executes commands relating to ISCSI volumes. + + We make use of model provider properties as follows: + + ``provider_location`` + if present, contains the iSCSI target information in the same + format as an ietadm discovery + i.e. ':, ' + + ``provider_auth`` + if present, contains a space-separated triple: + ' '. + `CHAP` is the only auth_method in use at the moment. + """ + + def __init__(self, *args, **kwargs): + self.tgtadm = iscsi.get_target_admin() + super(ISCSIDriver, self).__init__(*args, **kwargs) + + def set_execute(self, execute): + super(ISCSIDriver, self).set_execute(execute) + self.tgtadm.set_execute(execute) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + "provisioned for volume: %d"), volume['id']) + return + + iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) + + self.tgtadm.new_target(iscsi_name, iscsi_target, check_exit_code=False) + self.tgtadm.new_logicalunit(iscsi_target, 0, volume_path, + check_exit_code=False) + + def _ensure_iscsi_targets(self, context, host): + """Ensure that target ids have been created in datastore.""" + host_iscsi_targets = self.db.iscsi_target_count_by_host(context, host) + if host_iscsi_targets >= FLAGS.iscsi_num_targets: + return + # NOTE(vish): Target ids start at 1, not 0. + for target_num in xrange(1, FLAGS.iscsi_num_targets + 1): + target = {'host': host, 'target_num': target_num} + self.db.iscsi_target_create_safe(context, target) + + def create_export(self, context, volume): + """Creates an export for a logical volume.""" + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name']) + + self.tgtadm.new_target(iscsi_name, iscsi_target) + self.tgtadm.new_logicalunit(iscsi_target, 0, volume_path) + + model_update = {} + if FLAGS.iscsi_helper == 'tgtadm': + lun = 1 + else: + lun = 0 + model_update['provider_location'] = _iscsi_location( + FLAGS.iscsi_ip_address, iscsi_target, iscsi_name, lun) + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + "provisioned for volume: %d"), volume['id']) + return + + try: + # ietadm show will exit with an error + # this export has already been removed + self.tgtadm.show_target(iscsi_target) + except Exception as e: + LOG.info(_("Skipping remove_export. No iscsi_target " + "is presently exported for volume: %d"), volume['id']) + return + + self.tgtadm.delete_logicalunit(iscsi_target, 0) + self.tgtadm.delete_target(iscsi_target) + + def _do_iscsi_discovery(self, volume): + #TODO(justinsb): Deprecate discovery and use stored info + #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) + LOG.warn(_("ISCSI provider_location not stored, using discovery")) + + volume_name = volume['name'] + + (out, _err) = self._execute('iscsiadm', '-m', 'discovery', + '-t', 'sendtargets', '-p', volume['host'], + run_as_root=True) + for target in out.splitlines(): + if FLAGS.iscsi_ip_address in target and volume_name in target: + return target + return None + + def _get_iscsi_properties(self, volume): + """Gets iscsi configuration + + We ideally get saved information in the volume entity, but fall back + to discovery if need be. Discovery may be completely removed in future + The properties are: + + :target_discovered: boolean indicating whether discovery was used + + :target_iqn: the IQN of the iSCSI target + + :target_portal: the portal of the iSCSI target + + :target_lun: the lun of the iSCSI target + + :volume_id: the id of the volume (currently used by xen) + + :auth_method:, :auth_username:, :auth_password: + + the authentication details. Right now, either auth_method is not + present meaning no authentication, or auth_method == `CHAP` + meaning use CHAP with the specified credentials. + """ + + properties = {} + + location = volume['provider_location'] + + if location: + # provider_location is the same format as iSCSI discovery output + properties['target_discovered'] = False + else: + location = self._do_iscsi_discovery(volume) + + if not location: + raise exception.Error(_("Could not find iSCSI export " + " for volume %s") % + (volume['name'])) + + LOG.debug(_("ISCSI Discovery: Found %s") % (location)) + properties['target_discovered'] = True + + results = location.split(" ") + properties['target_portal'] = results[0].split(",")[0] + properties['target_iqn'] = results[1] + try: + properties['target_lun'] = int(results[2]) + except (IndexError, ValueError): + if FLAGS.iscsi_helper == 'tgtadm': + properties['target_lun'] = 1 + else: + properties['target_lun'] = 0 + + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return properties + + def _run_iscsiadm(self, iscsi_properties, iscsi_command): + (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', + iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal'], + *iscsi_command, run_as_root=True) + LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % + (iscsi_command, out, err)) + return (out, err) + + def _iscsiadm_update(self, iscsi_properties, property_key, property_value): + iscsi_command = ('--op', 'update', '-n', property_key, + '-v', property_value) + return self._run_iscsiadm(iscsi_properties, iscsi_command) + + def initialize_connection(self, volume, connector): + """Initializes the connection and returns connection info. + + The iscsi driver returns a driver_volume_type of 'iscsi'. + The format of the driver data is defined in _get_iscsi_properties. + Example return value:: + + { + 'driver_volume_type': 'iscsi' + 'data': { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': 1, + } + } + + """ + + iscsi_properties = self._get_iscsi_properties(volume) + return { + 'driver_volume_type': 'iscsi', + 'data': iscsi_properties + } + + def terminate_connection(self, volume, connector): + pass + + def check_for_export(self, context, volume_id): + """Make sure volume is exported.""" + + tid = self.db.volume_get_iscsi_target_num(context, volume_id) + try: + self.tgtadm.show_target(tid) + except exception.ProcessExecutionError, e: + # Instances remount read-only in this case. + # /etc/init.d/iscsitarget restart and rebooting cinder-volume + # is better since ensure_export() works at boot time. + LOG.error(_("Cannot confirm exported volume " + "id:%(volume_id)s.") % locals()) + raise + + +class FakeISCSIDriver(ISCSIDriver): + """Logs calls instead of executing.""" + def __init__(self, *args, **kwargs): + super(FakeISCSIDriver, self).__init__(execute=self.fake_execute, + *args, **kwargs) + + def check_for_setup_error(self): + """No setup necessary in fake mode.""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'iscsi', + 'data': {} + } + + def terminate_connection(self, volume, connector): + pass + + @staticmethod + def fake_execute(cmd, *_args, **_kwargs): + """Execute that simply logs the command.""" + LOG.debug(_("FAKE ISCSI: %s"), cmd) + return (None, None) + + +class RBDDriver(VolumeDriver): + """Implements RADOS block device (RBD) volume commands""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + (stdout, stderr) = self._execute('rados', 'lspools') + pools = stdout.split("\n") + if not FLAGS.rbd_pool in pools: + raise exception.Error(_("rbd has no pool %s") % + FLAGS.rbd_pool) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 + else: + size = int(volume['size']) * 1024 + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + '--size', size, 'create', volume['name']) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'rm', volume['name']) + + def create_snapshot(self, snapshot): + """Creates an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'create', '--snap', snapshot['name'], + snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes an rbd snapshot""" + self._try_execute('rbd', '--pool', FLAGS.rbd_pool, + 'snap', 'rm', '--snap', snapshot['name'], + snapshot['volume_name']) + + def local_path(self, volume): + """Returns the path of the rbd volume.""" + # This is the same as the remote path + # since qemu accesses it directly. + return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'rbd', + 'data': { + 'name': '%s/%s' % (FLAGS.rbd_pool, volume['name']), + 'auth_enabled': FLAGS.rbd_secret_uuid is not None, + 'auth_username': FLAGS.rbd_user, + 'secret_type': 'ceph', + 'secret_uuid': FLAGS.rbd_secret_uuid, + } + } + + def terminate_connection(self, volume, connector): + pass + + +class SheepdogDriver(VolumeDriver): + """Executes commands relating to Sheepdog Volumes""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + try: + #NOTE(francois-charlier) Since 0.24 'collie cluster info -r' + # gives short output, but for compatibility reason we won't + # use it and just check if 'running' is in the output. + (out, err) = self._execute('collie', 'cluster', 'info') + if not 'running' in out.split(): + raise exception.Error(_("Sheepdog is not working: %s") % out) + except exception.ProcessExecutionError: + raise exception.Error(_("Sheepdog is not working")) + + def create_volume(self, volume): + """Creates a sheepdog volume""" + self._try_execute('qemu-img', 'create', + "sheepdog:%s" % volume['name'], + self._sizestr(volume['size'])) + + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), + "sheepdog:%s" % volume['name']) + + def delete_volume(self, volume): + """Deletes a logical volume""" + self._try_execute('collie', 'vdi', 'delete', volume['name']) + + def create_snapshot(self, snapshot): + """Creates a sheepdog snapshot""" + self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'], + "sheepdog:%s" % snapshot['volume_name']) + + def delete_snapshot(self, snapshot): + """Deletes a sheepdog snapshot""" + self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'], + '-s', snapshot['name']) + + def local_path(self, volume): + return "sheepdog:%s" % volume['name'] + + def ensure_export(self, context, volume): + """Safely and synchronously recreates an export for a logical volume""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def initialize_connection(self, volume, connector): + return { + 'driver_volume_type': 'sheepdog', + 'data': { + 'name': volume['name'] + } + } + + def terminate_connection(self, volume, connector): + pass + + +class LoggingVolumeDriver(VolumeDriver): + """Logs and records calls, for unit tests.""" + + def check_for_setup_error(self): + pass + + def create_volume(self, volume): + self.log_action('create_volume', volume) + + def delete_volume(self, volume): + self.log_action('delete_volume', volume) + + def local_path(self, volume): + print "local_path not implemented" + raise NotImplementedError() + + def ensure_export(self, context, volume): + self.log_action('ensure_export', volume) + + def create_export(self, context, volume): + self.log_action('create_export', volume) + + def remove_export(self, context, volume): + self.log_action('remove_export', volume) + + def initialize_connection(self, volume, connector): + self.log_action('initialize_connection', volume) + + def terminate_connection(self, volume, connector): + self.log_action('terminate_connection', volume) + + def check_for_export(self, context, volume_id): + self.log_action('check_for_export', volume_id) + + _LOGS = [] + + @staticmethod + def clear_logs(): + LoggingVolumeDriver._LOGS = [] + + @staticmethod + def log_action(action, parameters): + """Logs the command.""" + LOG.debug(_("LoggingVolumeDriver: %s") % (action)) + log_dictionary = {} + if parameters: + log_dictionary = dict(parameters) + log_dictionary['action'] = action + LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary)) + LoggingVolumeDriver._LOGS.append(log_dictionary) + + @staticmethod + def all_logs(): + return LoggingVolumeDriver._LOGS + + @staticmethod + def logs_like(action, **kwargs): + matches = [] + for entry in LoggingVolumeDriver._LOGS: + if entry['action'] != action: + continue + match = True + for k, v in kwargs.iteritems(): + if entry.get(k) != v: + match = False + break + if match: + matches.append(entry) + return matches + + +def _iscsi_location(ip, target, iqn, lun=None): + return "%s:%s,%s %s %s" % (ip, FLAGS.iscsi_port, target, iqn, lun) diff --git a/cinder/volume/iscsi.py b/cinder/volume/iscsi.py new file mode 100644 index 00000000000..f7920597138 --- /dev/null +++ b/cinder/volume/iscsi.py @@ -0,0 +1,160 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Helper code for the iSCSI volume driver. + +""" + +from cinder import flags +from cinder.openstack.common import cfg +from cinder import utils + + +iscsi_helper_opt = cfg.StrOpt('iscsi_helper', + default='ietadm', + help='iscsi target user-land tool to use') + +FLAGS = flags.FLAGS +FLAGS.register_opt(iscsi_helper_opt) + + +class TargetAdmin(object): + """iSCSI target administration. + + Base class for iSCSI target admin helpers. + """ + + def __init__(self, cmd, execute): + self._cmd = cmd + self.set_execute(execute) + + def set_execute(self, execute): + """Set the function to be used to execute commands.""" + self._execute = execute + + def _run(self, *args, **kwargs): + self._execute(self._cmd, *args, run_as_root=True, **kwargs) + + def new_target(self, name, tid, **kwargs): + """Create a new iSCSI target.""" + raise NotImplementedError() + + def delete_target(self, tid, **kwargs): + """Delete a target.""" + raise NotImplementedError() + + def show_target(self, tid, **kwargs): + """Query the given target ID.""" + raise NotImplementedError() + + def new_logicalunit(self, tid, lun, path, **kwargs): + """Create a new LUN on a target using the supplied path.""" + raise NotImplementedError() + + def delete_logicalunit(self, tid, lun, **kwargs): + """Delete a logical unit from a target.""" + raise NotImplementedError() + + +class TgtAdm(TargetAdmin): + """iSCSI target administration using tgtadm.""" + + def __init__(self, execute=utils.execute): + super(TgtAdm, self).__init__('tgtadm', execute) + + def new_target(self, name, tid, **kwargs): + self._run('--op', 'new', + '--lld=iscsi', '--mode=target', + '--tid=%s' % tid, + '--targetname=%s' % name, + **kwargs) + self._run('--op', 'bind', + '--lld=iscsi', '--mode=target', + '--initiator-address=ALL', + '--tid=%s' % tid, + **kwargs) + + def delete_target(self, tid, **kwargs): + self._run('--op', 'delete', + '--lld=iscsi', '--mode=target', + '--tid=%s' % tid, + **kwargs) + + def show_target(self, tid, **kwargs): + self._run('--op', 'show', + '--lld=iscsi', '--mode=target', + '--tid=%s' % tid, + **kwargs) + + def new_logicalunit(self, tid, lun, path, **kwargs): + self._run('--op', 'new', + '--lld=iscsi', '--mode=logicalunit', + '--tid=%s' % tid, + '--lun=%d' % (lun + 1), # lun0 is reserved + '--backing-store=%s' % path, + **kwargs) + + def delete_logicalunit(self, tid, lun, **kwargs): + self._run('--op', 'delete', + '--lld=iscsi', '--mode=logicalunit', + '--tid=%s' % tid, + '--lun=%d' % (lun + 1), + **kwargs) + + +class IetAdm(TargetAdmin): + """iSCSI target administration using ietadm.""" + + def __init__(self, execute=utils.execute): + super(IetAdm, self).__init__('ietadm', execute) + + def new_target(self, name, tid, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--params', 'Name=%s' % name, + **kwargs) + + def delete_target(self, tid, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + **kwargs) + + def show_target(self, tid, **kwargs): + self._run('--op', 'show', + '--tid=%s' % tid, + **kwargs) + + def new_logicalunit(self, tid, lun, path, **kwargs): + self._run('--op', 'new', + '--tid=%s' % tid, + '--lun=%d' % lun, + '--params', 'Path=%s,Type=fileio' % path, + **kwargs) + + def delete_logicalunit(self, tid, lun, **kwargs): + self._run('--op', 'delete', + '--tid=%s' % tid, + '--lun=%d' % lun, + **kwargs) + + +def get_target_admin(): + if FLAGS.iscsi_helper == 'tgtadm': + return TgtAdm() + else: + return IetAdm() diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py new file mode 100644 index 00000000000..efb2eabf464 --- /dev/null +++ b/cinder/volume/manager.py @@ -0,0 +1,331 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume manager manages creating, attaching, detaching, and persistent storage. + +Persistant storage volumes keep their state independent of instances. You can +attach to an instance, terminate the instance, spawn a new instance (even +one from a different image) and re-attach the volume with the same data +intact. + +**Related Flags** + +:volume_topic: What :mod:`rpc` topic to listen to (default: `volume`). +:volume_manager: The module name of a class derived from + :class:`manager.Manager` (default: + :class:`cinder.volume.manager.Manager`). +:storage_availability_zone: Defaults to `cinder`. +:volume_driver: Used by :class:`Manager`. Defaults to + :class:`cinder.volume.driver.ISCSIDriver`. +:volume_group: Name of the group that will contain exported volumes (default: + `cinder-volumes`) +:num_shell_tries: Number of times to attempt to run commands (default: 3) + +""" + +from cinder import context +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import manager +from cinder.openstack.common import cfg +from cinder.openstack.common import importutils +from cinder import rpc +from cinder import utils +from cinder.volume import volume_types + + +LOG = logging.getLogger(__name__) + +volume_manager_opts = [ + cfg.StrOpt('storage_availability_zone', + default='cinder', + help='availability zone of this service'), + cfg.StrOpt('volume_driver', + default='cinder.volume.driver.ISCSIDriver', + help='Driver to use for volume creation'), + cfg.BoolOpt('use_local_volumes', + default=True, + help='if True, will not discover local volumes'), + cfg.BoolOpt('volume_force_update_capabilities', + default=False, + help='if True will force update capabilities on each check'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(volume_manager_opts) + + +class VolumeManager(manager.SchedulerDependentManager): + """Manages attachable block storage devices.""" + def __init__(self, volume_driver=None, *args, **kwargs): + """Load the driver from the one specified in args, or from flags.""" + if not volume_driver: + volume_driver = FLAGS.volume_driver + self.driver = importutils.import_object(volume_driver) + super(VolumeManager, self).__init__(service_name='volume', + *args, **kwargs) + # NOTE(vish): Implementation specific db handling is done + # by the driver. + self.driver.db = self.db + self._last_volume_stats = [] + + def init_host(self): + """Do any initialization that needs to be run if this is a + standalone service.""" + + ctxt = context.get_admin_context() + self.driver.do_setup(ctxt) + self.driver.check_for_setup_error() + + volumes = self.db.volume_get_all_by_host(ctxt, self.host) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) + for volume in volumes: + if volume['status'] in ['available', 'in-use']: + self.driver.ensure_export(ctxt, volume) + else: + LOG.info(_("volume %s: skipping export"), volume['name']) + + def create_volume(self, context, volume_id, snapshot_id=None): + """Creates and exports the volume.""" + context = context.elevated() + volume_ref = self.db.volume_get(context, volume_id) + LOG.info(_("volume %s: creating"), volume_ref['name']) + + self.db.volume_update(context, + volume_id, + {'host': self.host}) + # NOTE(vish): so we don't have to get volume from db again + # before passing it to the driver. + volume_ref['host'] = self.host + + try: + vol_name = volume_ref['name'] + vol_size = volume_ref['size'] + LOG.debug(_("volume %(vol_name)s: creating lv of" + " size %(vol_size)sG") % locals()) + if snapshot_id is None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) + + LOG.debug(_("volume %s: creating export"), volume_ref['name']) + model_update = self.driver.create_export(context, volume_ref) + if model_update: + self.db.volume_update(context, volume_ref['id'], model_update) + except Exception: + with utils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_ref['id'], {'status': 'error'}) + + now = utils.utcnow() + self.db.volume_update(context, + volume_ref['id'], {'status': 'available', + 'launched_at': now}) + LOG.debug(_("volume %s: created successfully"), volume_ref['name']) + self._reset_stats() + return volume_id + + def delete_volume(self, context, volume_id): + """Deletes and unexports volume.""" + context = context.elevated() + volume_ref = self.db.volume_get(context, volume_id) + if volume_ref['attach_status'] == "attached": + raise exception.Error(_("Volume is still attached")) + if volume_ref['host'] != self.host: + raise exception.Error(_("Volume is not local to this node")) + + self._reset_stats() + try: + LOG.debug(_("volume %s: removing export"), volume_ref['name']) + self.driver.remove_export(context, volume_ref) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) + self.driver.delete_volume(volume_ref) + except exception.VolumeIsBusy, e: + LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) + self.driver.ensure_export(context, volume_ref) + self.db.volume_update(context, volume_ref['id'], + {'status': 'available'}) + return True + except Exception: + with utils.save_and_reraise_exception(): + self.db.volume_update(context, + volume_ref['id'], + {'status': 'error_deleting'}) + + self.db.volume_destroy(context, volume_id) + LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) + return True + + def create_snapshot(self, context, volume_id, snapshot_id): + """Creates and exports the snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + LOG.info(_("snapshot %s: creating"), snapshot_ref['name']) + + try: + snap_name = snapshot_ref['name'] + LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + model_update = self.driver.create_snapshot(snapshot_ref) + if model_update: + self.db.snapshot_update(context, snapshot_ref['id'], + model_update) + + except Exception: + with utils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error'}) + + self.db.snapshot_update(context, + snapshot_ref['id'], {'status': 'available', + 'progress': '100%'}) + LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name']) + return snapshot_id + + def delete_snapshot(self, context, snapshot_id): + """Deletes and unexports snapshot.""" + context = context.elevated() + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + + try: + LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + self.driver.delete_snapshot(snapshot_ref) + except exception.SnapshotIsBusy: + LOG.debug(_("snapshot %s: snapshot is busy"), snapshot_ref['name']) + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'available'}) + return True + except Exception: + with utils.save_and_reraise_exception(): + self.db.snapshot_update(context, + snapshot_ref['id'], + {'status': 'error_deleting'}) + + self.db.snapshot_destroy(context, snapshot_id) + LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name']) + return True + + def attach_volume(self, context, volume_id, instance_id, mountpoint): + """Updates db to show volume is attached""" + # TODO(vish): refactor this into a more general "reserve" + self.db.volume_attached(context, + volume_id, + instance_id, + mountpoint) + + def detach_volume(self, context, volume_id): + """Updates db to show volume is detached""" + # TODO(vish): refactor this into a more general "unreserve" + self.db.volume_detached(context, volume_id) + + def initialize_connection(self, context, volume_id, connector): + """Prepare volume for connection from host represented by connector. + + This method calls the driver initialize_connection and returns + it to the caller. The connector parameter is a dictionary with + information about the host that will connect to the volume in the + following format:: + + { + 'ip': ip, + 'initiator': initiator, + } + + ip: the ip address of the connecting machine + + initiator: the iscsi initiator name of the connecting machine. + This can be None if the connecting machine does not support iscsi + connections. + + driver is responsible for doing any necessary security setup and + returning a connection_info dictionary in the following format:: + + { + 'driver_volume_type': driver_volume_type, + 'data': data, + } + + driver_volume_type: a string to identify the type of volume. This + can be used by the calling code to determine the + strategy for connecting to the volume. This could + be 'iscsi', 'rbd', 'sheepdog', etc. + + data: this is the data that the calling code will use to connect + to the volume. Keep in mind that this will be serialized to + json in various places, so it should not contain any non-json + data types. + """ + volume_ref = self.db.volume_get(context, volume_id) + return self.driver.initialize_connection(volume_ref, connector) + + def terminate_connection(self, context, volume_id, connector): + """Cleanup connection from host represented by connector. + + The format of connector is the same as for initialize_connection. + """ + volume_ref = self.db.volume_get(context, volume_id) + self.driver.terminate_connection(volume_ref, connector) + + def check_for_export(self, context, instance_id): + """Make sure whether volume is exported.""" + volumes = self.db.volume_get_all_by_instance(context, instance_id) + for volume in volumes: + self.driver.check_for_export(context, volume['id']) + + def _volume_stats_changed(self, stat1, stat2): + if FLAGS.volume_force_update_capabilities: + return True + if len(stat1) != len(stat2): + return True + for (k, v) in stat1.iteritems(): + if (k, v) not in stat2.iteritems(): + return True + return False + + @manager.periodic_task + def _report_driver_status(self, context): + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + LOG.info(_("Checking volume capabilities")) + + if self._volume_stats_changed(self._last_volume_stats, + volume_stats): + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + # avoid repeating fanouts + self.update_service_capabilities(None) + + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._reset_stats() diff --git a/cinder/volume/netapp.py b/cinder/volume/netapp.py new file mode 100644 index 00000000000..17f2f24f2b4 --- /dev/null +++ b/cinder/volume/netapp.py @@ -0,0 +1,676 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 NetApp, Inc. +# Copyright (c) 2012 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Volume driver for NetApp storage systems. + +This driver requires NetApp OnCommand 5.0 and one or more Data +ONTAP 7-mode storage systems with installed iSCSI licenses. + +""" + +import time +import string + +import suds +from suds import client +from suds.sax import text + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.volume import driver + +LOG = logging.getLogger("cinder.volume.driver") + +netapp_opts = [ + cfg.StrOpt('netapp_wsdl_url', + default=None, + help='URL of the WSDL file for the DFM server'), + cfg.StrOpt('netapp_login', + default=None, + help='User name for the DFM server'), + cfg.StrOpt('netapp_password', + default=None, + help='Password for the DFM server'), + cfg.StrOpt('netapp_server_hostname', + default=None, + help='Hostname for the DFM server'), + cfg.IntOpt('netapp_server_port', + default=8088, + help='Port number for the DFM server'), + cfg.StrOpt('netapp_storage_service', + default=None, + help='Storage service to use for provisioning'), + cfg.StrOpt('netapp_vfiler', + default=None, + help='Vfiler to use for provisioning'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(netapp_opts) + + +class NetAppISCSIDriver(driver.ISCSIDriver): + """NetApp iSCSI volume driver.""" + + def __init__(self, *args, **kwargs): + super(NetAppISCSIDriver, self).__init__(*args, **kwargs) + + def _check_fail(self, request, response): + if 'failed' == response.Status: + name = request.Name + reason = response.Reason + msg = _('API %(name)sfailed: %(reason)s') + raise exception.Error(msg % locals()) + + def _create_client(self, wsdl_url, login, password, hostname, port): + """ + Instantiate a "suds" client to make web services calls to the + DFM server. Note that the WSDL file is quite large and may take + a few seconds to parse. + """ + self.client = client.Client(wsdl_url, + username=login, + password=password) + soap_url = 'http://%s:%s/apis/soap/v1' % (hostname, port) + self.client.set_options(location=soap_url) + + def _set_storage_service(self, storage_service): + """Set the storage service to use for provisioning""" + self.storage_service = storage_service + + def _set_vfiler(self, vfiler): + """Set the vfiler to use for provisioning""" + self.vfiler = vfiler + + def _check_flags(self): + """Ensure that the flags we care about are set.""" + required_flags = ['netapp_wsdl_url', 'netapp_login', 'netapp_password', + 'netapp_server_hostname', 'netapp_server_port', + 'netapp_storage_service'] + for flag in required_flags: + if not getattr(FLAGS, flag, None): + raise exception.Error(_('%s is not set') % flag) + + def do_setup(self, context): + """ + Called one time by the manager after the driver is loaded. + Validate the flags we care about and setup the suds (web services) + client. + """ + self._check_flags() + self._create_client(FLAGS.netapp_wsdl_url, FLAGS.netapp_login, + FLAGS.netapp_password, FLAGS.netapp_server_hostname, + FLAGS.netapp_server_port) + self._set_storage_service(FLAGS.netapp_storage_service) + if FLAGS.netapp_vfiler: + self._set_vfiler(FLAGS.netapp_vfiler) + + def check_for_setup_error(self): + """Invoke a web services API to make sure we can talk to the server.""" + res = self.client.service.DfmAbout() + LOG.debug(_("Connected to DFM server")) + + def _get_job_progress(self, job_id): + """ + Obtain the latest progress report for the job and return the + list of progress events. + """ + server = self.client.service + res = server.DpJobProgressEventListIterStart(JobId=job_id) + tag = res.Tag + event_list = [] + try: + while True: + res = server.DpJobProgressEventListIterNext(Tag=tag, + Maximum=100) + if not hasattr(res, 'ProgressEvents'): + break + event_list += res.ProgressEvents.DpJobProgressEventInfo + finally: + server.DpJobProgressEventListIterEnd(Tag=tag) + return event_list + + def _wait_for_job(self, job_id): + """ + Poll the job until it completes or an error is detected. Return the + final list of progress events if it completes successfully. + """ + while True: + events = self._get_job_progress(job_id) + for event in events: + if event.EventStatus == 'error': + raise exception.Error(_('Job failed: %s') % + (event.ErrorMessage)) + if event.EventType == 'job-end': + return events + time.sleep(5) + + def _dataset_name(self, project): + """Return the dataset name for a given project """ + _project = string.replace(string.replace(project, ' ', '_'), '-', '_') + return 'OpenStack_' + _project + + def _does_dataset_exist(self, dataset_name): + """Check if a dataset already exists""" + server = self.client.service + try: + res = server.DatasetListInfoIterStart(ObjectNameOrId=dataset_name) + tag = res.Tag + except suds.WebFault: + return False + try: + res = server.DatasetListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Datasets') and res.Datasets.DatasetInfo: + return True + finally: + server.DatasetListInfoIterEnd(Tag=tag) + return False + + def _create_dataset(self, dataset_name): + """ + Create a new dataset using the storage service. The export settings are + set to create iSCSI LUNs aligned for Linux. + """ + server = self.client.service + + lunmap = self.client.factory.create('DatasetLunMappingInfo') + lunmap.IgroupOsType = 'linux' + export = self.client.factory.create('DatasetExportInfo') + export.DatasetExportProtocol = 'iscsi' + export.DatasetLunMappingInfo = lunmap + detail = self.client.factory.create('StorageSetInfo') + detail.DpNodeName = 'Primary data' + detail.DatasetExportInfo = export + if hasattr(self, 'vfiler'): + detail.ServerNameOrId = self.vfiler + details = self.client.factory.create('ArrayOfStorageSetInfo') + details.StorageSetInfo = [detail] + + server.StorageServiceDatasetProvision( + StorageServiceNameOrId=self.storage_service, + DatasetName=dataset_name, + AssumeConfirmation=True, + StorageSetDetails=details) + + def _provision(self, name, description, project, size): + """ + Provision a LUN through provisioning manager. The LUN will be created + inside a dataset associated with the project. If the dataset doesn't + already exist, we create it using the storage service specified in the + cinder conf. + """ + + dataset_name = self._dataset_name(project) + if not self._does_dataset_exist(dataset_name): + self._create_dataset(dataset_name) + + info = self.client.factory.create('ProvisionMemberRequestInfo') + info.Name = name + if description: + info.Description = description + info.Size = size + info.MaximumSnapshotSpace = 2 * long(size) + + server = self.client.service + lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset_name) + try: + server.DatasetProvisionMember(EditLockId=lock_id, + ProvisionMemberRequestInfo=info) + res = server.DatasetEditCommit(EditLockId=lock_id, + AssumeConfirmation=True) + except (suds.WebFault, Exception): + server.DatasetEditRollback(EditLockId=lock_id) + raise exception.Error(_('Failed to provision dataset member')) + + lun_id = None + + for info in res.JobIds.JobInfo: + events = self._wait_for_job(info.JobId) + for event in events: + if event.EventType != 'lun-create': + continue + lun_id = event.ProgressLunInfo.LunPathId + + if not lun_id: + raise exception.Error(_('No LUN was created by the provision job')) + + def _remove_destroy(self, name, project): + """ + Remove the LUN from the dataset and destroy the actual LUN on the + storage system. + """ + lun_id = self._get_lun_id(name, project) + if not lun_id: + raise exception.Error(_("Failed to find LUN ID for volume %s") % + (name)) + + member = self.client.factory.create('DatasetMemberParameter') + member.ObjectNameOrId = lun_id + members = self.client.factory.create('ArrayOfDatasetMemberParameter') + members.DatasetMemberParameter = [member] + + dataset_name = self._dataset_name(project) + + server = self.client.service + lock_id = server.DatasetEditBegin(DatasetNameOrId=dataset_name) + try: + server.DatasetRemoveMember(EditLockId=lock_id, Destroy=True, + DatasetMemberParameters=members) + server.DatasetEditCommit(EditLockId=lock_id, + AssumeConfirmation=True) + except (suds.WebFault, Exception): + server.DatasetEditRollback(EditLockId=lock_id) + msg = _('Failed to remove and delete dataset member') + raise exception.Error(msg) + + def create_volume(self, volume): + """Driver entry point for creating a new volume""" + default_size = '104857600' # 100 MB + gigabytes = 1073741824L # 2^30 + name = volume['name'] + project = volume['project_id'] + display_name = volume['display_name'] + display_description = volume['display_description'] + if display_name: + if display_description: + description = display_name + "\n" + display_description + else: + description = display_name + elif display_description: + description = display_description + if int(volume['size']) == 0: + size = default_size + else: + size = str(int(volume['size']) * gigabytes) + self._provision(name, description, project, size) + + def delete_volume(self, volume): + """Driver entry point for destroying existing volumes""" + name = volume['name'] + project = volume['project_id'] + self._remove_destroy(name, project) + + def _get_lun_id(self, name, project): + """ + Given the name of a volume, find the DFM (OnCommand) ID of the LUN + corresponding to that volume. Currently we do this by enumerating + all of the LUNs in the dataset and matching the names against the + OpenStack volume name. + + This could become a performance bottleneck in very large installations + in which case possible options for mitigating the problem are: + 1) Store the LUN ID alongside the volume in the cinder DB (if possible) + 2) Cache the list of LUNs in the dataset in driver memory + 3) Store the volume to LUN ID mappings in a local file + """ + dataset_name = self._dataset_name(project) + + server = self.client.service + res = server.DatasetMemberListInfoIterStart( + DatasetNameOrId=dataset_name, + IncludeExportsInfo=True, + IncludeIndirect=True, + MemberType='lun_path') + tag = res.Tag + suffix = '/' + name + try: + while True: + res = server.DatasetMemberListInfoIterNext(Tag=tag, + Maximum=100) + if (not hasattr(res, 'DatasetMembers') or + not res.DatasetMembers): + break + for member in res.DatasetMembers.DatasetMemberInfo: + if member.MemberName.endswith(suffix): + return member.MemberId + finally: + server.DatasetMemberListInfoIterEnd(Tag=tag) + + def _get_lun_details(self, lun_id): + """Given the ID of a LUN, get the details about that LUN""" + server = self.client.service + res = server.LunListInfoIterStart(ObjectNameOrId=lun_id) + tag = res.Tag + try: + res = server.LunListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Luns') and res.Luns.LunInfo: + return res.Luns.LunInfo[0] + finally: + server.LunListInfoIterEnd(Tag=tag) + + def _get_host_details(self, host_id): + """ + Given the ID of a host (storage system), get the details about that + host. + """ + server = self.client.service + res = server.HostListInfoIterStart(ObjectNameOrId=host_id) + tag = res.Tag + try: + res = server.HostListInfoIterNext(Tag=tag, Maximum=1) + if hasattr(res, 'Hosts') and res.Hosts.HostInfo: + return res.Hosts.HostInfo[0] + finally: + server.HostListInfoIterEnd(Tag=tag) + + def _get_iqn_for_host(self, host_id): + """Get the iSCSI Target Name for a storage system""" + request = self.client.factory.create('Request') + request.Name = 'iscsi-node-get-name' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return response.Results['node-name'][0] + + def _api_elem_is_empty(self, elem): + """ + Helper routine to figure out if a list returned from a proxy API + is empty. This is necessary because the API proxy produces nasty + looking XML. + """ + if not type(elem) is list: + return True + if 0 == len(elem): + return True + child = elem[0] + if isinstance(child, text.Text): + return True + if type(child) is str: + return True + return False + + def _get_target_portal_for_host(self, host_id, host_address): + """ + Get the iSCSI Target Portal details for a particular IP address + on a storage system. + """ + request = self.client.factory.create('Request') + request.Name = 'iscsi-portal-list-info' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + portal = {} + portals = response.Results['iscsi-portal-list-entries'] + if self._api_elem_is_empty(portals): + return portal + portal_infos = portals[0]['iscsi-portal-list-entry-info'] + for portal_info in portal_infos: + portal['address'] = portal_info['ip-address'][0] + portal['port'] = portal_info['ip-port'][0] + portal['portal'] = portal_info['tpgroup-tag'][0] + if host_address == portal['address']: + break + return portal + + def _get_export(self, volume): + """ + Looks up the LUN in DFM based on the volume and project name, then get + the LUN's ID. We store that value in the database instead of the iSCSI + details because we will not have the true iSCSI details until masking + time (when initialize_connection() is called). + """ + name = volume['name'] + project = volume['project_id'] + lun_id = self._get_lun_id(name, project) + if not lun_id: + msg = _("Failed to find LUN ID for volume %s") + raise exception.Error(msg % name) + return {'provider_location': lun_id} + + def ensure_export(self, context, volume): + """ + Driver entry point to get the iSCSI details about an existing volume + """ + return self._get_export(volume) + + def create_export(self, context, volume): + """ + Driver entry point to get the iSCSI details about a new volume + """ + return self._get_export(volume) + + def remove_export(self, context, volume): + """ + Since exporting is idempotent in this driver, we have nothing + to do for unexporting. + """ + pass + + def _find_igroup_for_initiator(self, host_id, initiator_name): + """ + Look for an existing igroup (initiator group) on the storage system + containing a given iSCSI initiator and return the name of the igroup. + """ + request = self.client.factory.create('Request') + request.Name = 'igroup-list-info' + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + igroups = response.Results['initiator-groups'] + if self._api_elem_is_empty(igroups): + return None + igroup_infos = igroups[0]['initiator-group-info'] + for igroup_info in igroup_infos: + if ('iscsi' != igroup_info['initiator-group-type'][0] or + 'linux' != igroup_info['initiator-group-os-type'][0]): + continue + igroup_name = igroup_info['initiator-group-name'][0] + if not igroup_name.startswith('openstack-'): + continue + initiators = igroup_info['initiators'][0]['initiator-info'] + for initiator in initiators: + if initiator_name == initiator['initiator-name'][0]: + return igroup_name + return None + + def _create_igroup(self, host_id, initiator_name): + """ + Create a new igroup (initiator group) on the storage system to hold + the given iSCSI initiator. The group will only have 1 member and will + be named "openstack-${initiator_name}". + """ + igroup_name = 'openstack-' + initiator_name + request = self.client.factory.create('Request') + request.Name = 'igroup-create' + igroup_create_xml = ( + '%s' + 'iscsi' + 'linuxlinux') + request.Args = text.Raw(igroup_create_xml % igroup_name) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + request = self.client.factory.create('Request') + request.Name = 'igroup-add' + igroup_add_xml = ( + '%s' + '%s') + request.Args = text.Raw(igroup_add_xml % (igroup_name, initiator_name)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return igroup_name + + def _get_lun_mappping(self, host_id, lunpath, igroup_name): + """ + Check if a given LUN is already mapped to the given igroup (initiator + group). If the LUN is mapped, also return the LUN number for the + mapping. + """ + request = self.client.factory.create('Request') + request.Name = 'lun-map-list-info' + request.Args = text.Raw('%s' % (lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + igroups = response.Results['initiator-groups'] + if self._api_elem_is_empty(igroups): + return {'mapped': False} + igroup_infos = igroups[0]['initiator-group-info'] + for igroup_info in igroup_infos: + if igroup_name == igroup_info['initiator-group-name'][0]: + return {'mapped': True, 'lun_num': igroup_info['lun-id'][0]} + return {'mapped': False} + + def _map_initiator(self, host_id, lunpath, igroup_name): + """ + Map the given LUN to the given igroup (initiator group). Return the LUN + number that the LUN was mapped to (the filer will choose the lowest + available number). + """ + request = self.client.factory.create('Request') + request.Name = 'lun-map' + lun_map_xml = ('%s' + '%s') + request.Args = text.Raw(lun_map_xml % (igroup_name, lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + return response.Results['lun-id-assigned'][0] + + def _unmap_initiator(self, host_id, lunpath, igroup_name): + """Unmap the given LUN from the given igroup (initiator group).""" + request = self.client.factory.create('Request') + request.Name = 'lun-unmap' + lun_unmap_xml = ('%s' + '%s') + request.Args = text.Raw(lun_unmap_xml % (igroup_name, lunpath)) + response = self.client.service.ApiProxy(Target=host_id, + Request=request) + self._check_fail(request, response) + + def _ensure_initiator_mapped(self, host_id, lunpath, initiator_name): + """ + Check if a LUN is mapped to a given initiator already and create + the mapping if it is not. A new igroup will be created if needed. + Returns the LUN number for the mapping between the LUN and initiator + in both cases. + """ + lunpath = '/vol/' + lunpath + igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) + if not igroup_name: + igroup_name = self._create_igroup(host_id, initiator_name) + + mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) + if mapping['mapped']: + return mapping['lun_num'] + return self._map_initiator(host_id, lunpath, igroup_name) + + def _ensure_initiator_unmapped(self, host_id, lunpath, initiator_name): + """ + Check if a LUN is mapped to a given initiator and remove the + mapping if it is. This does not destroy the igroup. + """ + lunpath = '/vol/' + lunpath + igroup_name = self._find_igroup_for_initiator(host_id, initiator_name) + if not igroup_name: + return + + mapping = self._get_lun_mappping(host_id, lunpath, igroup_name) + if mapping['mapped']: + self._unmap_initiator(host_id, lunpath, igroup_name) + + def initialize_connection(self, volume, connector): + """ + Do the LUN masking on the storage system so the initiator can access + the LUN on the target. Also return the iSCSI properties so the + initiator can find the LUN. This implementation does not call + _get_iscsi_properties() to get the properties because cannot store the + LUN number in the database. We only find out what the LUN number will + be during this method call so we construct the properties dictionary + ourselves. + """ + initiator_name = connector['initiator'] + lun_id = volume['provider_location'] + if not lun_id: + msg = _("No LUN ID for volume %s") + raise exception.Error(msg % volume['name']) + lun = self._get_lun_details(lun_id) + if not lun: + msg = _('Failed to get LUN details for LUN ID %s') + raise exception.Error(msg % lun_id) + lun_num = self._ensure_initiator_mapped(lun.HostId, lun.LunPath, + initiator_name) + + host = self._get_host_details(lun.HostId) + if not host: + msg = _('Failed to get host details for host ID %s') + raise exception.Error(msg % lun.HostId) + + portal = self._get_target_portal_for_host(host.HostId, + host.HostAddress) + if not portal: + msg = _('Failed to get target portal for filer: %s') + raise exception.Error(msg % host.HostName) + + iqn = self._get_iqn_for_host(host.HostId) + if not iqn: + msg = _('Failed to get target IQN for filer: %s') + raise exception.Error(msg % host.HostName) + + properties = {} + properties['target_discovered'] = False + (address, port) = (portal['address'], portal['port']) + properties['target_portal'] = '%s:%s' % (address, port) + properties['target_iqn'] = iqn + properties['target_lun'] = lun_num + properties['volume_id'] = volume['id'] + + auth = volume['provider_auth'] + if auth: + (auth_method, auth_username, auth_secret) = auth.split() + + properties['auth_method'] = auth_method + properties['auth_username'] = auth_username + properties['auth_password'] = auth_secret + + return { + 'driver_volume_type': 'iscsi', + 'data': properties, + } + + def terminate_connection(self, volume, connector): + """ + Unmask the LUN on the storage system so the given intiator can no + longer access it. + """ + initiator_name = connector['initiator'] + lun_id = volume['provider_location'] + if not lun_id: + msg = _('No LUN ID for volume %s') + raise exception.Error(msg % (volume['name'])) + lun = self._get_lun_details(lun_id) + if not lun: + msg = _('Failed to get LUN details for LUN ID %s') + raise exception.Error(msg % (lun_id)) + self._ensure_initiator_unmapped(lun.HostId, lun.LunPath, + initiator_name) + + def create_volume_from_snapshot(self, volume, snapshot): + raise NotImplementedError() + + def create_snapshot(self, snapshot): + raise NotImplementedError() + + def delete_snapshot(self, snapshot): + raise NotImplementedError() + + def check_for_export(self, context, volume_id): + raise NotImplementedError() diff --git a/cinder/volume/nexenta/__init__.py b/cinder/volume/nexenta/__init__.py new file mode 100644 index 00000000000..3050df8f666 --- /dev/null +++ b/cinder/volume/nexenta/__init__.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta` -- Package contains Nexenta-specific modules +===================================================================== + +.. automodule:: nexenta +.. moduleauthor:: Yuriy Taraday +""" + + +class NexentaException(Exception): + MESSAGE = _('Nexenta SA returned the error') + + def __init__(self, error=None): + super(NexentaException, self).__init__(self.message, error) + + def __str__(self): + return '%s: %s' % self.args diff --git a/cinder/volume/nexenta/jsonrpc.py b/cinder/volume/nexenta/jsonrpc.py new file mode 100644 index 00000000000..7a696f8405d --- /dev/null +++ b/cinder/volume/nexenta/jsonrpc.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.jsonrpc` -- Nexenta-specific JSON RPC client +===================================================================== + +.. automodule:: nexenta.jsonrpc +.. moduleauthor:: Yuriy Taraday +""" + +import json +import urllib2 + +from cinder.volume import nexenta +from cinder import log as logging + +LOG = logging.getLogger("cinder.volume.nexenta.jsonrpc") + + +class NexentaJSONException(nexenta.NexentaException): + pass + + +class NexentaJSONProxy(object): + def __init__(self, url, user, password, auto=False, obj=None, method=None): + self.url = url + self.user = user + self.password = password + self.auto = auto + self.obj = obj + self.method = method + + def __getattr__(self, name): + if not self.obj: + obj, method = name, None + elif not self.method: + obj, method = self.obj, name + else: + obj, method = '%s.%s' % (self.obj, self.method), name + return NexentaJSONProxy(self.url, self.user, self.password, self.auto, + obj, method) + + def __call__(self, *args): + data = json.dumps({'object': self.obj, + 'method': self.method, + 'params': args}) + auth = ('%s:%s' % (self.user, self.password)).encode('base64')[:-1] + headers = {'Content-Type': 'application/json', + 'Authorization': 'Basic %s' % (auth,)} + LOG.debug(_('Sending JSON data: %s'), data) + request = urllib2.Request(self.url, data, headers) + response_obj = urllib2.urlopen(request) + if response_obj.info().status == 'EOF in headers': + if self.auto and self.url.startswith('http://'): + LOG.info(_('Auto switching to HTTPS connection to %s'), + self.url) + self.url = 'https' + self.url[4:] + request = urllib2.Request(self.url, data, headers) + response_obj = urllib2.urlopen(request) + else: + LOG.error(_('No headers in server response')) + raise NexentaJSONException(_('Bad response from server')) + + response_data = response_obj.read() + LOG.debug(_('Got response: %s'), response_data) + response = json.loads(response_data) + if response.get('error') is not None: + raise NexentaJSONException(response['error'].get('message', '')) + else: + return response.get('result') diff --git a/cinder/volume/nexenta/volume.py b/cinder/volume/nexenta/volume.py new file mode 100644 index 00000000000..ad3b4e3c15a --- /dev/null +++ b/cinder/volume/nexenta/volume.py @@ -0,0 +1,282 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 Nexenta Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance +===================================================================== + +.. automodule:: nexenta.volume +.. moduleauthor:: Yuriy Taraday +""" + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder.volume import driver +from cinder.volume import nexenta +from cinder.volume.nexenta import jsonrpc + +LOG = logging.getLogger("cinder.volume.nexenta.volume") +FLAGS = flags.FLAGS + +nexenta_opts = [ + cfg.StrOpt('nexenta_host', + default='', + help='IP address of Nexenta SA'), + cfg.IntOpt('nexenta_rest_port', + default=2000, + help='HTTP port to connect to Nexenta REST API server'), + cfg.StrOpt('nexenta_rest_protocol', + default='auto', + help='Use http or https for REST connection (default auto)'), + cfg.StrOpt('nexenta_user', + default='admin', + help='User name to connect to Nexenta SA'), + cfg.StrOpt('nexenta_password', + default='nexenta', + help='Password to connect to Nexenta SA'), + cfg.IntOpt('nexenta_iscsi_target_portal_port', + default=3260, + help='Nexenta target portal port'), + cfg.StrOpt('nexenta_volume', + default='cinder', + help='pool on SA that will hold all volumes'), + cfg.StrOpt('nexenta_target_prefix', + default='iqn.1986-03.com.sun:02:cinder-', + help='IQN prefix for iSCSI targets'), + cfg.StrOpt('nexenta_target_group_prefix', + default='cinder/', + help='prefix for iSCSI target groups on SA'), + cfg.StrOpt('nexenta_blocksize', + default='', + help='block size for volumes (blank=default,8KB)'), + cfg.BoolOpt('nexenta_sparse', + default=False, + help='flag to create sparse volumes'), +] +FLAGS.register_opts(nexenta_opts) + + +class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921 + """Executes volume driver commands on Nexenta Appliance.""" + + def __init__(self): + super(NexentaDriver, self).__init__() + + def do_setup(self, context): + protocol = FLAGS.nexenta_rest_protocol + auto = protocol == 'auto' + if auto: + protocol = 'http' + self.nms = jsonrpc.NexentaJSONProxy( + '%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host, + FLAGS.nexenta_rest_port), + FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto) + + def check_for_setup_error(self): + """Verify that the volume for our zvols exists. + + :raise: :py:exc:`LookupError` + """ + if not self.nms.volume.object_exists(FLAGS.nexenta_volume): + raise LookupError(_("Volume %s does not exist in Nexenta SA"), + FLAGS.nexenta_volume) + + @staticmethod + def _get_zvol_name(volume_name): + """Return zvol name that corresponds given volume name.""" + return '%s/%s' % (FLAGS.nexenta_volume, volume_name) + + @staticmethod + def _get_target_name(volume_name): + """Return iSCSI target name to access volume.""" + return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name) + + @staticmethod + def _get_target_group_name(volume_name): + """Return Nexenta iSCSI target group name for volume.""" + return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name) + + def create_volume(self, volume): + """Create a zvol on appliance. + + :param volume: volume reference + """ + self.nms.zvol.create( + self._get_zvol_name(volume['name']), + '%sG' % (volume['size'],), + FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse) + + def delete_volume(self, volume): + """Destroy a zvol on appliance. + + :param volume: volume reference + """ + try: + self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '') + except nexenta.NexentaException as exc: + if "zvol has children" in exc.args[1]: + raise exception.VolumeIsBusy + else: + raise + + def create_snapshot(self, snapshot): + """Create snapshot of existing zvol on appliance. + + :param snapshot: shapshot reference + """ + self.nms.zvol.create_snapshot( + self._get_zvol_name(snapshot['volume_name']), + snapshot['name'], '') + + def create_volume_from_snapshot(self, volume, snapshot): + """Create new volume from other's snapshot on appliance. + + :param volume: reference of volume to be created + :param snapshot: reference of source snapshot + """ + self.nms.zvol.clone( + '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), + snapshot['name']), + self._get_zvol_name(volume['name'])) + + def delete_snapshot(self, snapshot): + """Delete volume's snapshot on appliance. + + :param snapshot: shapshot reference + """ + try: + self.nms.snapshot.destroy( + '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), + snapshot['name']), + '') + except nexenta.NexentaException as exc: + if "snapshot has dependent clones" in exc.args[1]: + raise exception.SnapshotIsBusy + else: + raise + + def local_path(self, volume): + """Return local path to existing local volume. + + We never have local volumes, so it raises NotImplementedError. + + :raise: :py:exc:`NotImplementedError` + """ + LOG.error(_("Call to local_path should not happen." + " Verify that use_local_volumes flag is turned off.")) + raise NotImplementedError + + def _do_export(self, _ctx, volume, ensure=False): + """Do all steps to get zvol exported as LUN 0 at separate target. + + :param volume: reference of volume to be exported + :param ensure: if True, ignore errors caused by already existing + resources + :return: iscsiadm-formatted provider location string + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + + try: + self.nms.iscsitarget.create_target({'target_name': target_name}) + except nexenta.NexentaException as exc: + if not ensure or 'already configured' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.stmf.create_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + if not ensure or 'already exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target group creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.stmf.add_targetgroup_member(target_group_name, + target_name) + except nexenta.NexentaException as exc: + if not ensure or 'already exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored target group member addition error "%s"' + ' while ensuring export'), exc) + try: + self.nms.scsidisk.create_lu(zvol_name, {}) + except nexenta.NexentaException as exc: + if not ensure or 'in use' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored LU creation error "%s"' + ' while ensuring export'), exc) + try: + self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { + 'target_group': target_group_name, + 'lun': '0'}) + except nexenta.NexentaException as exc: + if not ensure or 'view entry exists' not in exc.args[1]: + raise + else: + LOG.info(_('Ignored LUN mapping entry addition error "%s"' + ' while ensuring export'), exc) + return '%s:%s,1 %s' % (FLAGS.nexenta_host, + FLAGS.nexenta_iscsi_target_portal_port, + target_name) + + def create_export(self, _ctx, volume): + """Create new export for zvol. + + :param volume: reference of volume to be exported + :return: iscsiadm-formatted provider location string + """ + loc = self._do_export(_ctx, volume, ensure=False) + return {'provider_location': loc} + + def ensure_export(self, _ctx, volume): + """Recreate parts of export if necessary. + + :param volume: reference of volume to be exported + """ + self._do_export(_ctx, volume, ensure=True) + + def remove_export(self, _ctx, volume): + """Destroy all resources created to export zvol. + + :param volume: reference of volume to be unexported + """ + zvol_name = self._get_zvol_name(volume['name']) + target_name = self._get_target_name(volume['name']) + target_group_name = self._get_target_group_name(volume['name']) + self.nms.scsidisk.delete_lu(zvol_name) + + try: + self.nms.stmf.destroy_targetgroup(target_group_name) + except nexenta.NexentaException as exc: + # We assume that target group is already gone + LOG.warn(_('Got error trying to destroy target group' + ' %(target_group)s, assuming it is already gone: %(exc)s'), + {'target_group': target_group_name, 'exc': exc}) + try: + self.nms.iscsitarget.delete_target(target_name) + except nexenta.NexentaException as exc: + # We assume that target is gone as well + LOG.warn(_('Got error trying to delete target %(target)s,' + ' assuming it is already gone: %(exc)s'), + {'target': target_name, 'exc': exc}) diff --git a/cinder/volume/san.py b/cinder/volume/san.py new file mode 100644 index 00000000000..4edc81addbf --- /dev/null +++ b/cinder/volume/san.py @@ -0,0 +1,897 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Drivers for san-stored volumes. + +The unique thing about a SAN is that we don't expect that we can run the volume +controller on the SAN hardware. We expect to access it over SSH or some API. +""" + +import base64 +import httplib +import json +import os +import paramiko +import random +import socket +import string +import uuid + +from lxml import etree + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder.openstack.common import cfg +from cinder import utils +import cinder.volume.driver + + +LOG = logging.getLogger(__name__) + +san_opts = [ + cfg.BoolOpt('san_thin_provision', + default='true', + help='Use thin provisioning for SAN volumes?'), + cfg.StrOpt('san_ip', + default='', + help='IP address of SAN controller'), + cfg.StrOpt('san_login', + default='admin', + help='Username for SAN controller'), + cfg.StrOpt('san_password', + default='', + help='Password for SAN controller'), + cfg.StrOpt('san_private_key', + default='', + help='Filename of private key to use for SSH authentication'), + cfg.StrOpt('san_clustername', + default='', + help='Cluster name to use for creating volumes'), + cfg.IntOpt('san_ssh_port', + default=22, + help='SSH port to use with SAN'), + cfg.BoolOpt('san_is_local', + default='false', + help='Execute commands locally instead of over SSH; ' + 'use if the volume service is running on the SAN device'), + cfg.StrOpt('san_zfs_volume_base', + default='rpool/', + help='The ZFS path under which to create zvols for volumes.'), + ] + +FLAGS = flags.FLAGS +FLAGS.register_opts(san_opts) + + +class SanISCSIDriver(cinder.volume.driver.ISCSIDriver): + """Base class for SAN-style storage volumes + + A SAN-style storage value is 'different' because the volume controller + probably won't run on it, so we need to access is over SSH or another + remote protocol. + """ + + def __init__(self): + super(SanISCSIDriver, self).__init__() + self.run_local = FLAGS.san_is_local + + def _build_iscsi_target_name(self, volume): + return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name']) + + def _connect_to_ssh(self): + ssh = paramiko.SSHClient() + #TODO(justinsb): We need a better SSH key policy + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + if FLAGS.san_password: + ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, + username=FLAGS.san_login, + password=FLAGS.san_password) + elif FLAGS.san_private_key: + privatekeyfile = os.path.expanduser(FLAGS.san_private_key) + # It sucks that paramiko doesn't support DSA keys + privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) + ssh.connect(FLAGS.san_ip, + port=FLAGS.san_ssh_port, + username=FLAGS.san_login, + pkey=privatekey) + else: + raise exception.Error(_("Specify san_password or san_private_key")) + return ssh + + def _execute(self, *cmd, **kwargs): + if self.run_local: + return utils.execute(*cmd, **kwargs) + else: + check_exit_code = kwargs.pop('check_exit_code', None) + command = ' '.join(*cmd) + return self._run_ssh(command, check_exit_code) + + def _run_ssh(self, command, check_exit_code=True): + #TODO(justinsb): SSH connection caching (?) + ssh = self._connect_to_ssh() + + #TODO(justinsb): Reintroduce the retry hack + ret = utils.ssh_execute(ssh, command, check_exit_code=check_exit_code) + + ssh.close() + + return ret + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met.""" + if not self.run_local: + if not (FLAGS.san_password or FLAGS.san_private_key): + raise exception.Error(_('Specify san_password or ' + 'san_private_key')) + + # The san_ip must always be set, because we use it for the target + if not (FLAGS.san_ip): + raise exception.Error(_("san_ip must be set")) + + +def _collect_lines(data): + """Split lines from data into an array, trimming them """ + matches = [] + for line in data.splitlines(): + match = line.strip() + matches.append(match) + + return matches + + +def _get_prefixed_values(data, prefix): + """Collect lines which start with prefix; with trimming""" + matches = [] + for line in data.splitlines(): + line = line.strip() + if line.startswith(prefix): + match = line[len(prefix):] + match = match.strip() + matches.append(match) + + return matches + + +class SolarisISCSIDriver(SanISCSIDriver): + """Executes commands relating to Solaris-hosted ISCSI volumes. + + Basic setup for a Solaris iSCSI server: + + pkg install storage-server SUNWiscsit + + svcadm enable stmf + + svcadm enable -r svc:/network/iscsi/target:default + + pfexec itadm create-tpg e1000g0 ${MYIP} + + pfexec itadm create-target -t e1000g0 + + + Then grant the user that will be logging on lots of permissions. + I'm not sure exactly which though: + + zfs allow justinsb create,mount,destroy rpool + + usermod -P'File System Management' justinsb + + usermod -P'Primary Administrator' justinsb + + Also make sure you can login using san_login & san_password/san_private_key + """ + + def _execute(self, *cmd, **kwargs): + new_cmd = ['pfexec'] + new_cmd.extend(*cmd) + return super(SolarisISCSIDriver, self)._execute(self, + *new_cmd, + **kwargs) + + def _view_exists(self, luid): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-view', '-l', luid, + check_exit_code=False) + if "no views found" in out: + return False + + if "View Entry:" in out: + return True + + raise exception.Error("Cannot parse list-view output: %s" % (out)) + + def _get_target_groups(self): + """Gets list of target groups from host.""" + (out, _err) = self._execute('/usr/sbin/stmfadm', 'list-tg') + matches = _get_prefixed_values(out, 'Target group: ') + LOG.debug("target_groups=%s" % matches) + return matches + + def _target_group_exists(self, target_group_name): + return target_group_name not in self._get_target_groups() + + def _get_target_group_members(self, target_group_name): + (out, _err) = self._execute('/usr/sbin/stmfadm', + 'list-tg', '-v', target_group_name) + matches = _get_prefixed_values(out, 'Member: ') + LOG.debug("members of %s=%s" % (target_group_name, matches)) + return matches + + def _is_target_group_member(self, target_group_name, iscsi_target_name): + return iscsi_target_name in ( + self._get_target_group_members(target_group_name)) + + def _get_iscsi_targets(self): + (out, _err) = self._execute('/usr/sbin/itadm', 'list-target') + matches = _collect_lines(out) + + # Skip header + if len(matches) != 0: + assert 'TARGET NAME' in matches[0] + matches = matches[1:] + + targets = [] + for line in matches: + items = line.split() + assert len(items) == 3 + targets.append(items[0]) + + LOG.debug("_get_iscsi_targets=%s" % (targets)) + return targets + + def _iscsi_target_exists(self, iscsi_target_name): + return iscsi_target_name in self._get_iscsi_targets() + + def _build_zfs_poolname(self, volume): + zfs_poolname = '%s%s' % (FLAGS.san_zfs_volume_base, volume['name']) + return zfs_poolname + + def create_volume(self, volume): + """Creates a volume.""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + + zfs_poolname = self._build_zfs_poolname(volume) + + # Create a zfs volume + cmd = ['/usr/sbin/zfs', 'create'] + if FLAGS.san_thin_provision: + cmd.append('-s') + cmd.extend(['-V', sizestr]) + cmd.append(zfs_poolname) + self._execute(*cmd) + + def _get_luid(self, volume): + zfs_poolname = self._build_zfs_poolname(volume) + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + + (out, _err) = self._execute('/usr/sbin/sbdadm', 'list-lu') + + lines = _collect_lines(out) + + # Strip headers + if len(lines) >= 1: + if lines[0] == '': + lines = lines[1:] + + if len(lines) >= 4: + assert 'Found' in lines[0] + assert '' == lines[1] + assert 'GUID' in lines[2] + assert '------------------' in lines[3] + + lines = lines[4:] + + for line in lines: + items = line.split() + assert len(items) == 3 + if items[2] == zvol_name: + luid = items[0].strip() + return luid + + raise Exception(_('LUID not found for %(zfs_poolname)s. ' + 'Output=%(out)s') % locals()) + + def _is_lu_created(self, volume): + luid = self._get_luid(volume) + return luid + + def delete_volume(self, volume): + """Deletes a volume.""" + zfs_poolname = self._build_zfs_poolname(volume) + self._execute('/usr/sbin/zfs', 'destroy', zfs_poolname) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + escaped_group = FLAGS.volume_group.replace('-', '--') + escaped_name = volume['name'].replace('-', '--') + return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + #TODO(justinsb): On bootup, this is called for every volume. + # It then runs ~5 SSH commands for each volume, + # most of which fetch the same info each time + # This makes initial start stupid-slow + return self._do_export(volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(volume, force_create=True) + + def _do_export(self, volume, force_create): + # Create a Logical Unit (LU) backed by the zfs volume + zfs_poolname = self._build_zfs_poolname(volume) + + if force_create or not self._is_lu_created(volume): + zvol_name = '/dev/zvol/rdsk/%s' % zfs_poolname + self._execute('/usr/sbin/sbdadm', 'create-lu', zvol_name) + + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + # Create a iSCSI target, mapped to just this volume + if force_create or not self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'create-tg', target_group_name) + + # Yes, we add the initiatior before we create it! + # Otherwise, it complains that the target is already active + if force_create or not self._is_target_group_member(target_group_name, + iscsi_name): + self._execute('/usr/sbin/stmfadm', + 'add-tg-member', '-g', target_group_name, iscsi_name) + + if force_create or not self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/itadm', 'create-target', '-n', iscsi_name) + + if force_create or not self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', + 'add-view', '-t', target_group_name, luid) + + #TODO(justinsb): Is this always 1? Does it matter? + iscsi_portal_interface = '1' + iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface + + db_update = {} + db_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_name)) + + return db_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + + # This is the reverse of _do_export + luid = self._get_luid(volume) + iscsi_name = self._build_iscsi_target_name(volume) + target_group_name = 'tg-%s' % volume['name'] + + if self._view_exists(luid): + self._execute('/usr/sbin/stmfadm', 'remove-view', '-l', luid, '-a') + + if self._iscsi_target_exists(iscsi_name): + self._execute('/usr/sbin/stmfadm', 'offline-target', iscsi_name) + self._execute('/usr/sbin/itadm', 'delete-target', iscsi_name) + + # We don't delete the tg-member; we delete the whole tg! + + if self._target_group_exists(target_group_name): + self._execute('/usr/sbin/stmfadm', 'delete-tg', target_group_name) + + if self._is_lu_created(volume): + self._execute('/usr/sbin/sbdadm', 'delete-lu', luid) + + +class HpSanISCSIDriver(SanISCSIDriver): + """Executes commands relating to HP/Lefthand SAN ISCSI volumes. + + We use the CLIQ interface, over SSH. + + Rough overview of CLIQ commands used: + + :createVolume: (creates the volume) + + :getVolumeInfo: (to discover the IQN etc) + + :getClusterInfo: (to discover the iSCSI target IP address) + + :assignVolumeChap: (exports it with CHAP security) + + The 'trick' here is that the HP SAN enforces security by default, so + normally a volume mount would need both to configure the SAN in the volume + layer and do the mount on the compute layer. Multi-layer operations are + not catered for at the moment in the cinder architecture, so instead we + share the volume using CHAP at volume creation time. Then the mount need + only use those CHAP credentials, so can take place exclusively in the + compute layer. + """ + + def _cliq_run(self, verb, cliq_args): + """Runs a CLIQ command over SSH, without doing any result parsing""" + cliq_arg_strings = [] + for k, v in cliq_args.items(): + cliq_arg_strings.append(" %s=%s" % (k, v)) + cmd = verb + ''.join(cliq_arg_strings) + + return self._run_ssh(cmd) + + def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True): + """Runs a CLIQ command over SSH, parsing and checking the output""" + cliq_args['output'] = 'XML' + (out, _err) = self._cliq_run(verb, cliq_args) + + LOG.debug(_("CLIQ command returned %s"), out) + + result_xml = etree.fromstring(out) + if check_cliq_result: + response_node = result_xml.find("response") + if response_node is None: + msg = (_("Malformed response to CLIQ command " + "%(verb)s %(cliq_args)s. Result=%(out)s") % + locals()) + raise exception.Error(msg) + + result_code = response_node.attrib.get("result") + + if result_code != "0": + msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. " + " Result=%(out)s") % + locals()) + raise exception.Error(msg) + + return result_xml + + def _cliq_get_cluster_info(self, cluster_name): + """Queries for info about the cluster (including IP)""" + cliq_args = {} + cliq_args['clusterName'] = cluster_name + cliq_args['searchDepth'] = '1' + cliq_args['verbose'] = '0' + + result_xml = self._cliq_run_xml("getClusterInfo", cliq_args) + + return result_xml + + def _cliq_get_cluster_vip(self, cluster_name): + """Gets the IP on which a cluster shares iSCSI volumes""" + cluster_xml = self._cliq_get_cluster_info(cluster_name) + + vips = [] + for vip in cluster_xml.findall("response/cluster/vip"): + vips.append(vip.attrib.get('ipAddress')) + + if len(vips) == 1: + return vips[0] + + _xml = etree.tostring(cluster_xml) + msg = (_("Unexpected number of virtual ips for cluster " + " %(cluster_name)s. Result=%(_xml)s") % + locals()) + raise exception.Error(msg) + + def _cliq_get_volume_info(self, volume_name): + """Gets the volume info, including IQN""" + cliq_args = {} + cliq_args['volumeName'] = volume_name + result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args) + + # Result looks like this: + # + # + # + # + # + # + # + # + + # Flatten the nodes into a dictionary; use prefixes to avoid collisions + volume_attributes = {} + + volume_node = result_xml.find("response/volume") + for k, v in volume_node.attrib.items(): + volume_attributes["volume." + k] = v + + status_node = volume_node.find("status") + if not status_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["status." + k] = v + + # We only consider the first permission node + permission_node = volume_node.find("permission") + if not permission_node is None: + for k, v in status_node.attrib.items(): + volume_attributes["permission." + k] = v + + LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") % + locals()) + return volume_attributes + + def create_volume(self, volume): + """Creates a volume.""" + cliq_args = {} + cliq_args['clusterName'] = FLAGS.san_clustername + #TODO(justinsb): Should we default to inheriting thinProvision? + cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0' + cliq_args['volumeName'] = volume['name'] + if int(volume['size']) == 0: + cliq_args['size'] = '100MB' + else: + cliq_args['size'] = '%sGB' % volume['size'] + + self._cliq_run_xml("createVolume", cliq_args) + + volume_info = self._cliq_get_volume_info(volume['name']) + cluster_name = volume_info['volume.clusterName'] + iscsi_iqn = volume_info['volume.iscsiIqn'] + + #TODO(justinsb): Is this always 1? Does it matter? + cluster_interface = '1' + + cluster_vip = self._cliq_get_cluster_vip(cluster_name) + iscsi_portal = cluster_vip + ":3260," + cluster_interface + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + iscsi_iqn)) + + return model_update + + def delete_volume(self, volume): + """Deletes a volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['prompt'] = 'false' # Don't confirm + + self._cliq_run_xml("deleteVolume", cliq_args) + + def local_path(self, volume): + # TODO(justinsb): Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + return self._do_export(context, volume, force_create=False) + + def create_export(self, context, volume): + return self._do_export(context, volume, force_create=True) + + def _do_export(self, context, volume, force_create): + """Supports ensure_export and create_export""" + volume_info = self._cliq_get_volume_info(volume['name']) + + is_shared = 'permission.authGroup' in volume_info + + model_update = {} + + should_export = False + + if force_create or not is_shared: + should_export = True + # Check that we have a project_id + project_id = volume['project_id'] + if not project_id: + project_id = context.project_id + + if project_id: + #TODO(justinsb): Use a real per-project password here + chap_username = 'proj_' + project_id + # HP/Lefthand requires that the password be >= 12 characters + chap_password = 'project_secret_' + project_id + else: + msg = (_("Could not determine project for volume %s, " + "can't export") % + (volume['name'])) + if force_create: + raise exception.Error(msg) + else: + LOG.warn(msg) + should_export = False + + if should_export: + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + cliq_args['chapName'] = chap_username + cliq_args['targetSecret'] = chap_password + + self._cliq_run_xml("assignVolumeChap", cliq_args) + + model_update['provider_auth'] = ("CHAP %s %s" % + (chap_username, chap_password)) + + return model_update + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + cliq_args = {} + cliq_args['volumeName'] = volume['name'] + + self._cliq_run_xml("unassignVolume", cliq_args) + + +class SolidFireSanISCSIDriver(SanISCSIDriver): + + def _issue_api_request(self, method_name, params): + """All API requests to SolidFire device go through this method + + Simple json-rpc web based API calls. + each call takes a set of paramaters (dict) + and returns results in a dict as well. + """ + + host = FLAGS.san_ip + # For now 443 is the only port our server accepts requests on + port = 443 + + # NOTE(john-griffith): Probably don't need this, but the idea is + # we provide a request_id so we can correlate + # responses with requests + request_id = int(uuid.uuid4()) # just generate a random number + + cluster_admin = FLAGS.san_login + cluster_password = FLAGS.san_password + + command = {'method': method_name, + 'id': request_id} + + if params is not None: + command['params'] = params + + payload = json.dumps(command, ensure_ascii=False) + payload.encode('utf-8') + # we use json-rpc, webserver needs to see json-rpc in header + header = {'Content-Type': 'application/json-rpc; charset=utf-8'} + + if cluster_password is not None: + # base64.encodestring includes a newline character + # in the result, make sure we strip it off + auth_key = base64.encodestring('%s:%s' % (cluster_admin, + cluster_password))[:-1] + header['Authorization'] = 'Basic %s' % auth_key + + LOG.debug(_("Payload for SolidFire API call: %s"), payload) + connection = httplib.HTTPSConnection(host, port) + connection.request('POST', '/json-rpc/1.0', payload, header) + response = connection.getresponse() + data = {} + + if response.status != 200: + connection.close() + raise exception.SolidFireAPIException(status=response.status) + + else: + data = response.read() + try: + data = json.loads(data) + + except (TypeError, ValueError), exc: + connection.close() + msg = _("Call to json.loads() raised an exception: %s") % exc + raise exception.SfJsonEncodeFailure(msg) + + connection.close() + + LOG.debug(_("Results of SolidFire API call: %s"), data) + return data + + def _get_volumes_by_sfaccount(self, account_id): + params = {'accountID': account_id} + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' in data: + return data['result']['volumes'] + + def _get_sfaccount_by_name(self, sf_account_name): + sfaccount = None + params = {'username': sf_account_name} + data = self._issue_api_request('GetAccountByName', params) + if 'result' in data and 'account' in data['result']: + LOG.debug(_('Found solidfire account: %s'), sf_account_name) + sfaccount = data['result']['account'] + return sfaccount + + def _create_sfaccount(self, cinder_project_id): + """Create account on SolidFire device if it doesn't already exist. + + We're first going to check if the account already exits, if it does + just return it. If not, then create it. + """ + + sf_account_name = socket.gethostname() + '-' + cinder_project_id + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + LOG.debug(_('solidfire account: %s does not exist, create it...'), + sf_account_name) + chap_secret = self._generate_random_string(12) + params = {'username': sf_account_name, + 'initiatorSecret': chap_secret, + 'targetSecret': chap_secret, + 'attributes': {}} + data = self._issue_api_request('AddAccount', params) + if 'result' in data: + sfaccount = self._get_sfaccount_by_name(sf_account_name) + + return sfaccount + + def _get_cluster_info(self): + params = {} + data = self._issue_api_request('GetClusterInfo', params) + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + return data['result'] + + def _do_export(self, volume): + """Gets the associated account, retrieves CHAP info and updates.""" + + sfaccount_name = '%s-%s' % (socket.gethostname(), volume['project_id']) + sfaccount = self._get_sfaccount_by_name(sfaccount_name) + + model_update = {} + model_update['provider_auth'] = ('CHAP %s %s' + % (sfaccount['username'], sfaccount['targetSecret'])) + + return model_update + + def _generate_random_string(self, length): + """Generates random_string to use for CHAP password.""" + + char_set = string.ascii_uppercase + string.digits + return ''.join(random.sample(char_set, length)) + + def create_volume(self, volume): + """Create volume on SolidFire device. + + The account is where CHAP settings are derived from, volume is + created and exported. Note that the new volume is immediately ready + for use. + + One caveat here is that an existing user account must be specified + in the API call to create a new volume. We use a set algorithm to + determine account info based on passed in cinder volume object. First + we check to see if the account already exists (and use it), or if it + does not already exist, we'll go ahead and create it. + + For now, we're just using very basic settings, QOS is + turned off, 512 byte emulation is off etc. Will be + looking at extensions for these things later, or + this module can be hacked to suit needs. + """ + + LOG.debug(_("Enter SolidFire create_volume...")) + GB = 1048576 * 1024 + slice_count = 1 + enable_emulation = False + attributes = {} + + cluster_info = self._get_cluster_info() + iscsi_portal = cluster_info['clusterInfo']['svip'] + ':3260' + sfaccount = self._create_sfaccount(volume['project_id']) + account_id = sfaccount['accountID'] + account_name = sfaccount['username'] + chap_secret = sfaccount['targetSecret'] + + params = {'name': volume['name'], + 'accountID': account_id, + 'sliceCount': slice_count, + 'totalSize': volume['size'] * GB, + 'enable512e': enable_emulation, + 'attributes': attributes} + + data = self._issue_api_request('CreateVolume', params) + + if 'result' not in data or 'volumeID' not in data['result']: + raise exception.SolidFireAPIDataException(data=data) + + volume_id = data['result']['volumeID'] + + volume_list = self._get_volumes_by_sfaccount(account_id) + iqn = None + for v in volume_list: + if v['volumeID'] == volume_id: + iqn = 'iqn.2010-01.com.solidfire:' + v['iqn'] + break + + model_update = {} + + # NOTE(john-griffith): SF volumes are always at lun 0 + model_update['provider_location'] = ('%s %s %s' + % (iscsi_portal, iqn, 0)) + model_update['provider_auth'] = ('CHAP %s %s' + % (account_name, chap_secret)) + + LOG.debug(_("Leaving SolidFire create_volume")) + return model_update + + def delete_volume(self, volume): + """Delete SolidFire Volume from device. + + SolidFire allows multipe volumes with same name, + volumeID is what's guaranteed unique. + + What we'll do here is check volumes based on account. this + should work because cinder will increment its volume_id + so we should always get the correct volume. This assumes + that cinder does not assign duplicate ID's. + """ + + LOG.debug(_("Enter SolidFire delete_volume...")) + sf_account_name = socket.gethostname() + '-' + volume['project_id'] + sfaccount = self._get_sfaccount_by_name(sf_account_name) + if sfaccount is None: + raise exception.SfAccountNotFound(account_name=sf_account_name) + + params = {'accountID': sfaccount['accountID']} + data = self._issue_api_request('ListVolumesForAccount', params) + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + found_count = 0 + volid = -1 + for v in data['result']['volumes']: + if v['name'] == volume['name']: + found_count += 1 + volid = v['volumeID'] + + if found_count != 1: + LOG.debug(_("Deleting volumeID: %s"), volid) + raise exception.DuplicateSfVolumeNames(vol_name=volume['name']) + + params = {'volumeID': volid} + data = self._issue_api_request('DeleteVolume', params) + if 'result' not in data: + raise exception.SolidFireAPIDataException(data=data) + + LOG.debug(_("Leaving SolidFire delete_volume")) + + def ensure_export(self, context, volume): + LOG.debug(_("Executing SolidFire ensure_export...")) + return self._do_export(volume) + + def create_export(self, context, volume): + LOG.debug(_("Executing SolidFire create_export...")) + return self._do_export(volume) diff --git a/cinder/volume/volume_types.py b/cinder/volume/volume_types.py new file mode 100644 index 00000000000..5df9e6e60b4 --- /dev/null +++ b/cinder/volume/volume_types.py @@ -0,0 +1,125 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright (c) 2010 Citrix Systems, Inc. +# Copyright 2011 Ken Pepple +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Built-in volume type properties.""" + +from cinder import context +from cinder import db +from cinder import exception +from cinder import flags +from cinder import log as logging + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +def create(context, name, extra_specs={}): + """Creates volume types.""" + try: + db.volume_type_create(context, + dict(name=name, + extra_specs=extra_specs)) + except exception.DBError, e: + LOG.exception(_('DB error: %s') % e) + raise exception.VolumeTypeCreateFailed(name=name, + extra_specs=extra_specs) + + +def destroy(context, name): + """Marks volume types as deleted.""" + if name is None: + msg = _("name cannot be None") + raise exception.InvalidVolumeType(reason=msg) + else: + db.volume_type_destroy(context, name) + + +def get_all_types(context, inactive=0, search_opts={}): + """Get all non-deleted volume_types. + + Pass true as argument if you want deleted volume types returned also. + + """ + vol_types = db.volume_type_get_all(context, inactive) + + if search_opts: + LOG.debug(_("Searching by: %s") % str(search_opts)) + + def _check_extra_specs_match(vol_type, searchdict): + for k, v in searchdict.iteritems(): + if (k not in vol_type['extra_specs'].keys() + or vol_type['extra_specs'][k] != v): + return False + return True + + # search_option to filter_name mapping. + filter_mapping = {'extra_specs': _check_extra_specs_match} + + result = {} + for type_name, type_args in vol_types.iteritems(): + # go over all filters in the list + for opt, values in search_opts.iteritems(): + try: + filter_func = filter_mapping[opt] + except KeyError: + # no such filter - ignore it, go to next filter + continue + else: + if filter_func(type_args, values): + result[type_name] = type_args + break + vol_types = result + return vol_types + + +def get_volume_type(ctxt, id): + """Retrieves single volume type by id.""" + if id is None: + msg = _("id cannot be None") + raise exception.InvalidVolumeType(reason=msg) + + if ctxt is None: + ctxt = context.get_admin_context() + + return db.volume_type_get(ctxt, id) + + +def get_volume_type_by_name(context, name): + """Retrieves single volume type by name.""" + if name is None: + msg = _("name cannot be None") + raise exception.InvalidVolumeType(reason=msg) + + return db.volume_type_get_by_name(context, name) + + +def is_key_value_present(volume_type_id, key, value, volume_type=None): + if volume_type_id is None: + return False + + if volume_type is None: + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + if (volume_type.get('extra_specs') is None or + volume_type['extra_specs'].get(key) != value): + return False + else: + return True diff --git a/cinder/volume/xensm.py b/cinder/volume/xensm.py new file mode 100644 index 00000000000..ad21afe0e09 --- /dev/null +++ b/cinder/volume/xensm.py @@ -0,0 +1,237 @@ +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import utils +from cinder.virt.xenapi import connection as xenapi_conn +from cinder.virt.xenapi import volumeops +import cinder.volume.driver + +LOG = logging.getLogger(__name__) +FLAGS = flags.FLAGS + + +class XenSMDriver(cinder.volume.driver.VolumeDriver): + + def _convert_config_params(self, conf_str): + params = dict([item.split("=") for item in conf_str.split()]) + return params + + def _get_introduce_sr_keys(self, params): + if 'name_label' in params: + del params['name_label'] + keys = params.keys() + keys.append('sr_type') + return keys + + def _create_storage_repo(self, context, backend_ref): + """Either creates or introduces SR on host + depending on whether it exists in xapi db.""" + params = self._convert_config_params(backend_ref['config_params']) + if 'name_label' in params: + label = params['name_label'] + del params['name_label'] + else: + label = 'SR-' + str(backend_ref['id']) + + params['sr_type'] = backend_ref['sr_type'] + + if backend_ref['sr_uuid'] is None: + # run the sr create command + try: + LOG.debug(_('SR name = %s') % label) + LOG.debug(_('Params: %s') % str(params)) + sr_uuid = self._volumeops.create_sr(label, params) + # update sr_uuid and created in db + except Exception as ex: + LOG.debug(_("Failed to create sr %s...continuing") % + str(backend_ref['id'])) + raise exception.Error(_('Create failed')) + + LOG.debug(_('SR UUID of new SR is: %s') % sr_uuid) + try: + self.db.sm_backend_conf_update(context, + backend_ref['id'], + dict(sr_uuid=sr_uuid)) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to update db")) + + else: + # sr introduce, if not already done + try: + self._volumeops.introduce_sr(backend_ref['sr_uuid'], label, + params) + except Exception as ex: + LOG.exception(ex) + LOG.debug(_("Failed to introduce sr %s...continuing") + % str(backend_ref['id'])) + + def _create_storage_repos(self, context): + """Create/Introduce storage repositories at start.""" + backends = self.db.sm_backend_conf_get_all(context) + for backend in backends: + try: + self._create_storage_repo(context, backend) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_('Failed to reach backend %d') + % backend['id']) + + def __init__(self, *args, **kwargs): + """Connect to the hypervisor.""" + + # This driver leverages Xen storage manager, and hence requires + # hypervisor to be Xen + if FLAGS.connection_type != 'xenapi': + raise exception.Error(_('XenSMDriver requires xenapi connection')) + + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + try: + session = xenapi_conn.XenAPISession(url, username, password) + self._volumeops = volumeops.VolumeOps(session) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to initiate session")) + + super(XenSMDriver, self).__init__(execute=utils.execute, + sync_exec=utils.execute, + *args, **kwargs) + + def do_setup(self, ctxt): + """Setup includes creating or introducing storage repos + existing in the database and destroying deleted ones.""" + + # TODO(renukaapte) purge storage repos + self.ctxt = ctxt + self._create_storage_repos(ctxt) + + def create_volume(self, volume): + """Creates a logical volume. Can optionally return a Dictionary of + changes to the volume object to be persisted.""" + + # For now the scheduling logic will be to try to fit the volume in + # the first available backend. + # TODO(renukaapte) better scheduling once APIs are in place + sm_vol_rec = None + backends = self.db.sm_backend_conf_get_all(self.ctxt) + for backend in backends: + # Ensure that storage repo exists, if not create. + # This needs to be done because if cinder compute and + # volume are both running on this host, then, as a + # part of detach_volume, compute could potentially forget SR + self._create_storage_repo(self.ctxt, backend) + sm_vol_rec = self._volumeops.create_volume_for_sm(volume, + backend['sr_uuid']) + if sm_vol_rec: + LOG.debug(_('Volume will be created in backend - %d') + % backend['id']) + break + + if sm_vol_rec: + # Update db + sm_vol_rec['id'] = volume['id'] + sm_vol_rec['backend_id'] = backend['id'] + try: + self.db.sm_volume_create(self.ctxt, sm_vol_rec) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to update volume in db")) + + else: + raise exception.Error(_('Unable to create volume')) + + def delete_volume(self, volume): + + vol_rec = self.db.sm_volume_get(self.ctxt, volume['id']) + + try: + # If compute runs on this node, detach could have disconnected SR + backend_ref = self.db.sm_backend_conf_get(self.ctxt, + vol_rec['backend_id']) + self._create_storage_repo(self.ctxt, backend_ref) + self._volumeops.delete_volume_for_sm(vol_rec['vdi_uuid']) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to delete vdi")) + + try: + self.db.sm_volume_delete(self.ctxt, volume['id']) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to delete volume in db")) + + def local_path(self, volume): + return str(volume['id']) + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host.""" + pass + + def discover_volume(self, context, volume): + return str(volume['id']) + + def check_for_setup_error(self): + pass + + def create_export(self, context, volume): + """Exports the volume.""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume.""" + pass + + def ensure_export(self, context, volume): + """Safely, synchronously recreates an export for a logical volume.""" + pass + + def initialize_connection(self, volume, connector): + try: + xensm_properties = dict(self.db.sm_volume_get(self.ctxt, + volume['id'])) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to find volume in db")) + + # Keep the volume id key consistent with what ISCSI driver calls it + xensm_properties['volume_id'] = xensm_properties['id'] + del xensm_properties['id'] + + try: + backend_conf = self.db.sm_backend_conf_get(self.ctxt, + xensm_properties['backend_id']) + except Exception as ex: + LOG.exception(ex) + raise exception.Error(_("Failed to find backend in db")) + + params = self._convert_config_params(backend_conf['config_params']) + + xensm_properties['flavor_id'] = backend_conf['flavor_id'] + xensm_properties['sr_uuid'] = backend_conf['sr_uuid'] + xensm_properties['sr_type'] = backend_conf['sr_type'] + xensm_properties.update(params) + _introduce_sr_keys = self._get_introduce_sr_keys(params) + xensm_properties['introduce_sr_keys'] = _introduce_sr_keys + return { + 'driver_volume_type': 'xensm', + 'data': xensm_properties + } + + def terminate_connection(self, volume, connector): + pass diff --git a/cinder/wsgi.py b/cinder/wsgi.py new file mode 100644 index 00000000000..0bf63f9eeb7 --- /dev/null +++ b/cinder/wsgi.py @@ -0,0 +1,374 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Utility methods for working with WSGI servers.""" + +import sys + +import eventlet +import eventlet.wsgi +import greenlet +from paste import deploy +import routes.middleware +import webob.dec +import webob.exc + +from cinder import exception +from cinder import flags +from cinder import log as logging +from cinder import utils + + +FLAGS = flags.FLAGS +LOG = logging.getLogger(__name__) + + +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" + + default_pool_size = 1000 + + def __init__(self, name, app, host=None, port=None, pool_size=None, + protocol=eventlet.wsgi.HttpProtocol): + """Initialize, but do not start, a WSGI server. + + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :returns: None + + """ + self.name = name + self.app = app + self.host = host or "0.0.0.0" + self.port = port or 0 + self._server = None + self._socket = None + self._protocol = protocol + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("eventlet.wsgi.server") + self._wsgi_logger = logging.WritableLogger(self._logger) + + def _start(self): + """Run the blocking eventlet WSGI server. + + :returns: None + + """ + eventlet.wsgi.server(self._socket, + self.app, + protocol=self._protocol, + custom_pool=self._pool, + log=self._wsgi_logger) + + def start(self, backlog=128): + """Start serving a WSGI application. + + :param backlog: Maximum number of queued connections. + :returns: None + :raises: cinder.exception.InvalidInput + + """ + if backlog < 1: + raise exception.InvalidInput( + reason='The backlog must be more than 1') + self._socket = eventlet.listen((self.host, self.port), backlog=backlog) + self._server = eventlet.spawn(self._start) + (self.host, self.port) = self._socket.getsockname() + LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__) + + def stop(self): + """Stop this server. + + This is not a very nice action, as currently the method by which a + server is stopped is by killing its eventlet. + + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + self._server.kill() + + def wait(self): + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ + try: + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) + + +class Request(webob.Request): + pass + + +class Application(object): + """Base WSGI application wrapper. Subclasses need to implement __call__.""" + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = cinder.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import cinder.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + + def __call__(self, environ, start_response): + r"""Subclasses will probably want to implement __call__ like this: + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + # Any of the following objects work as responses: + + # Option 1: simple string + res = 'message\n' + + # Option 2: a nicely formatted HTTP exception page + res = exc.HTTPForbidden(detail='Nice try') + + # Option 3: a webob Response object (in case you need to play with + # headers, or you want to be treated like an iterable, or or or) + res = Response(); + res.app_iter = open('somefile') + + # Option 4: any wsgi app to be run next + res = self.application + + # Option 5: you can get a Response object for a wsgi app, too, to + # play with headers etc + res = req.get_response(self.application) + + # You can then just return your response... + return res + # ... or set req.response and return None. + req.response = res + + See the end of http://pythonpaste.org/webob/modules/dec.html + for more info. + + """ + raise NotImplementedError(_('You must implement __call__')) + + +class Middleware(Application): + """Base WSGI middleware. + + These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + + """ + + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config files. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = cinder.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import cinder.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): + self.application = application + + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) + + +class Debug(Middleware): + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + print ('*' * 40) + ' REQUEST ENVIRON' + for key, value in req.environ.items(): + print key, '=', value + print + resp = req.get_response(self.application) + + print ('*' * 40) + ' RESPONSE HEADERS' + for (key, value) in resp.headers.iteritems(): + print key, '=', value + print + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """Iterator that prints the contents of a wrapper string.""" + print ('*' * 40) + ' BODY' + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print + + +class Router(object): + """WSGI middleware that maps incoming requests to WSGI apps.""" + + def __init__(self, mapper): + """Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be an object that can route + the request to the action-specific method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, '/svrlist', controller=sc, action='list') + + # Actions are all implicitly defined + mapper.resource('server', 'servers', controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + + """ + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + """Route the incoming request to a controller based on self.map. + + If no match, return a 404. + + """ + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=Request) + def _dispatch(req): + """Dispatch the request to the appropriate controller. + + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404 + or the routed WSGI app's response. + + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Loader(object): + """Used to load WSGI applications from paste configurations.""" + + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. + + :param config_path: Full or relative path to the paste config. + :returns: None + + """ + config_path = config_path or FLAGS.api_paste_config + self.config_path = utils.find_config(config_path) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `cinder.exception.PasteAppNotFound` + + """ + try: + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/contrib/openstack-config b/contrib/openstack-config new file mode 100755 index 00000000000..d7979f7ffea --- /dev/null +++ b/contrib/openstack-config @@ -0,0 +1,65 @@ +#!/bin/sh -e +### BEGIN INIT INFO +# Provides: openstack +# Required-Start: mountkernfs $local_fs +# Required-Stop: $local_fs +# X-Start-Before: networking +# Should-Start: +# Default-Start: S +# Default-Stop: +# Short-Description: Apply configuration from OpenStack Config Drive +### END INIT INFO + +PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin" + +. /lib/lsb/init-functions + +copy_cloud_config() { + LABEL="config" + if [ ! -e /dev/disk/by-label/${LABEL} ]; then + log_warning_msg "OpenStack Cloud Config drive not found" + return 1 + fi + + MNT=/tmp/config + mkdir -p ${MNT} + mount /dev/disk/by-label/${LABEL} ${MNT} + if [ -e ${MNT}/root/.ssh/authorized_keys ]; then + mkdir -m 700 -p /root/.ssh/ + cp ${MNT}/root/.ssh/authorized_keys /root/.ssh/ + chmod 600 ${MNT}/root/.ssh/authorized_keys + fi + if [ -e ${MNT}/etc/network/interfaces ]; then + cp ${MNT}/etc/network/interfaces /etc/network/ + chmod 644 /etc/network/interfaces + fi + umount ${MNT} + return 0 +} + +case "$1" in + start|"") + log_action_begin_msg "Applying OpenStack Cloud Config" + if copy_cloud_config; then + log_action_end_msg $? + else + log_action_end_msg $? + fi + ;; + + restart|reload|force-reload|status) + echo "Error: argument '$1' not supported" >&2 + exit 3 + ;; + + stop) + # No-op + ;; + + *) + echo "Usage: openstack.sh [start|stop]" >&2 + exit 3 + ;; +esac + +: diff --git a/contrib/redhat-eventlet.patch b/contrib/redhat-eventlet.patch new file mode 100644 index 00000000000..0b77e6f72cc --- /dev/null +++ b/contrib/redhat-eventlet.patch @@ -0,0 +1,16 @@ +--- .cinder-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py.orig +2011-05-25 +23:31:34.597271402 +0000 ++++ .cinder-venv/lib/python2.6/site-packages/eventlet/green/subprocess.py +2011-05-25 +23:33:24.055602468 +0000 +@@ -32,7 +32,7 @@ + setattr(self, attr, wrapped_pipe) + __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ + +- def wait(self, check_interval=0.01): ++ def wait(self, check_interval=0.01, timeout=None): + # Instead of a blocking OS call, this version of wait() uses logic + # borrowed from the eventlet 0.2 processes.Process.wait() method. + try: + diff --git a/doc/.gitignore b/doc/.gitignore new file mode 100644 index 00000000000..291b04e45dd --- /dev/null +++ b/doc/.gitignore @@ -0,0 +1,3 @@ +_build/* +source/api/* +.autogenerated diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 00000000000..ba789b5df85 --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,97 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXSOURCE = source +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SPHINXSOURCE) + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +.DEFAULT_GOAL = html + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + -rm -rf cinder.sqlite + if [ -f .autogenerated ] ; then \ + cat .autogenerated | xargs rm ; \ + rm .autogenerated ; \ + fi + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cinder.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cinder.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/README.rst b/doc/README.rst new file mode 100644 index 00000000000..835dbc5d9f6 --- /dev/null +++ b/doc/README.rst @@ -0,0 +1,55 @@ +================= +Building the docs +================= + +Dependencies +============ + +Sphinx_ + You'll need sphinx (the python one) and if you are + using the virtualenv you'll need to install it in the virtualenv + specifically so that it can load the cinder modules. + + :: + + pip install Sphinx + +Graphviz_ + Some of the diagrams are generated using the ``dot`` language + from Graphviz. + + :: + + sudo apt-get install graphviz + +.. _Sphinx: http://sphinx.pocoo.org + +.. _Graphviz: http://www.graphviz.org/ + + +Use `make` +========== + +Just type make:: + + % make + +Look in the Makefile for more targets. + + +Manually +======== + + 1. Generate the code.rst file so that Sphinx will pull in our docstrings:: + + % ./generate_autodoc_index.sh > source/code.rst + + 2. Run `sphinx_build`:: + + % sphinx-build -b html source build/html + + +The docs have been built +======================== + +Check out the `build` directory to find them. Yay! diff --git a/doc/ext/__init__.py b/doc/ext/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/doc/ext/nova_autodoc.py b/doc/ext/nova_autodoc.py new file mode 100644 index 00000000000..a778f4a522c --- /dev/null +++ b/doc/ext/nova_autodoc.py @@ -0,0 +1,12 @@ +import gettext +import os + +gettext.install('cinder') + +from cinder import utils + + +def setup(app): + print "**Autodocumenting from %s" % os.path.abspath(os.curdir) + rv = utils.execute('./doc/generate_autodoc_index.sh') + print rv[0] diff --git a/doc/ext/nova_todo.py b/doc/ext/nova_todo.py new file mode 100644 index 00000000000..2bd65207114 --- /dev/null +++ b/doc/ext/nova_todo.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# This is a hack of the builtin todo extension, to make the todo_list more user friendly + +from sphinx.ext.todo import * +import re + +def _(s): + return s + + +def process_todo_nodes(app, doctree, fromdocname): + if not app.config['todo_include_todos']: + for node in doctree.traverse(todo_node): + node.parent.remove(node) + + # Replace all todolist nodes with a list of the collected todos. + # Augment each todo with a backlink to the original location. + env = app.builder.env + + if not hasattr(env, 'todo_all_todos'): + env.todo_all_todos = [] + + + # remove the item that was added in the constructor, since I'm tired of + # reading through docutils for the proper way to construct an empty list + lists = [] + for i in xrange(5): + lists.append(nodes.bullet_list("", nodes.Text('',''))) + lists[i].remove(lists[i][0]) + lists[i]['classes'].append('todo_list') + + for node in doctree.traverse(todolist): + if not app.config['todo_include_todos']: + node.replace_self([]) + continue + + for todo_info in env.todo_all_todos: + para = nodes.paragraph() + filename = env.doc2path(todo_info['docname'], base=None) + + # Create a reference + newnode = nodes.reference('', '') + + line_info = todo_info['lineno'] + link = _('%(filename)s, line %(line_info)d') % locals() + innernode = nodes.emphasis(link, link) + newnode['refdocname'] = todo_info['docname'] + + try: + newnode['refuri'] = app.builder.get_relative_uri( + fromdocname, todo_info['docname']) + newnode['refuri'] += '#' + todo_info['target']['refid'] + except NoUri: + # ignore if no URI can be determined, e.g. for LaTeX output + pass + + newnode.append(innernode) + para += newnode + para['classes'].append('todo_link') + + todo_entry = todo_info['todo'] + + env.resolve_references(todo_entry, todo_info['docname'], app.builder) + + item = nodes.list_item('', para) + todo_entry[1]['classes'].append('details') + + comment = todo_entry[1] + + m = re.match(r"^P(\d)", comment.astext()) + priority = 5 + if m: + priority = int(m.group(1)) + if (priority < 0): priority = 1 + if (priority > 5): priority = 5 + + item['classes'].append('todo_p' + str(priority)) + todo_entry['classes'].append('todo_p' + str(priority)) + + item.append(comment) + + lists[priority-1].insert(0, item) + + + node.replace_self(lists) + +def setup(app): + app.add_config_value('todo_include_todos', False, False) + + app.add_node(todolist) + app.add_node(todo_node, + html=(visit_todo_node, depart_todo_node), + latex=(visit_todo_node, depart_todo_node), + text=(visit_todo_node, depart_todo_node)) + + app.add_directive('todo', Todo) + app.add_directive('todolist', TodoList) + app.connect('doctree-read', process_todos) + app.connect('doctree-resolved', process_todo_nodes) + app.connect('env-purge-doc', purge_todos) + diff --git a/doc/find_autodoc_modules.sh b/doc/find_autodoc_modules.sh new file mode 100755 index 00000000000..fb7e451a08c --- /dev/null +++ b/doc/find_autodoc_modules.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +CINDER_DIR='cinder/' # include trailing slash +DOCS_DIR='source' + +modules='' +for x in `find ${CINDER_DIR} -name '*.py' | grep -v cinder/tests`; do + if [ `basename ${x} .py` == "__init__" ] ; then + continue + fi + relative=cinder.`echo ${x} | sed -e 's$^'${CINDER_DIR}'$$' -e 's/.py$//' -e 's$/$.$g'` + modules="${modules} ${relative}" +done + +for mod in ${modules} ; do + if [ ! -f "${DOCS_DIR}/${mod}.rst" ]; + then + echo ${mod} + fi +done diff --git a/doc/generate_autodoc_index.sh b/doc/generate_autodoc_index.sh new file mode 100755 index 00000000000..bdfa73a4966 --- /dev/null +++ b/doc/generate_autodoc_index.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +SOURCEDIR=doc/source/api + +if [ ! -d ${SOURCEDIR} ] ; then + mkdir -p ${SOURCEDIR} +fi + +for x in `./doc/find_autodoc_modules.sh`; +do + echo "Generating ${SOURCEDIR}/${x}.rst" + echo "${SOURCEDIR}/${x}.rst" >> .autogenerated + heading="The :mod:\`${x}\` Module" + # Figure out how long the heading is + # and make sure to emit that many '=' under + # it to avoid heading format errors + # in Sphinx. + heading_len=$(echo "$heading" | wc -c) + underline=$(head -c $heading_len < /dev/zero | tr '\0' '=') + ( cat < ${SOURCEDIR}/${x}.rst + +done + +if [ ! -f ${SOURCEDIR}/autoindex.rst ] ; then + + cat > ${SOURCEDIR}/autoindex.rst <> ${SOURCEDIR}/autoindex.rst + done + + echo ${SOURCEDIR}/autoindex.rst >> .autogenerated +fi diff --git a/doc/source/_ga/layout.html b/doc/source/_ga/layout.html new file mode 100644 index 00000000000..f29e9096854 --- /dev/null +++ b/doc/source/_ga/layout.html @@ -0,0 +1,17 @@ +{% extends "!layout.html" %} + +{% block footer %} +{{ super() }} + + +{% endblock %} + diff --git a/doc/source/_static/.gitignore b/doc/source/_static/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/doc/source/_static/.placeholder b/doc/source/_static/.placeholder new file mode 100644 index 00000000000..e69de29bb2d diff --git a/doc/source/_static/basic.css b/doc/source/_static/basic.css new file mode 100644 index 00000000000..d909ce37c74 --- /dev/null +++ b/doc/source/_static/basic.css @@ -0,0 +1,416 @@ +/** + * Sphinx stylesheet -- basic theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +img { + border: 0; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +/* -- general body styles --------------------------------------------------- */ + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 0; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +/* -- other body styles ----------------------------------------------------- */ + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlight { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.refcount { + color: #060; +} + +.optional { + font-size: 1.3em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +tt.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +tt.descclassname { + background-color: transparent; +} + +tt.xref, a tt { + background-color: transparent; + font-weight: bold; +} + +h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { + background-color: transparent; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} diff --git a/doc/source/_static/default.css b/doc/source/_static/default.css new file mode 100644 index 00000000000..c8091ecb4d6 --- /dev/null +++ b/doc/source/_static/default.css @@ -0,0 +1,230 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + text-align: left; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +tt { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +.warning tt { + background: #efc2c2; +} + +.note tt { + background: #d6d6d6; +} diff --git a/doc/source/_static/jquery.tweet.js b/doc/source/_static/jquery.tweet.js new file mode 100644 index 00000000000..79bf0bdb4cf --- /dev/null +++ b/doc/source/_static/jquery.tweet.js @@ -0,0 +1,154 @@ +(function($) { + + $.fn.tweet = function(o){ + var s = { + username: ["seaofclouds"], // [string] required, unless you want to display our tweets. :) it can be an array, just do ["username1","username2","etc"] + list: null, //[string] optional name of list belonging to username + avatar_size: null, // [integer] height and width of avatar if displayed (48px max) + count: 3, // [integer] how many tweets to display? + intro_text: null, // [string] do you want text BEFORE your your tweets? + outro_text: null, // [string] do you want text AFTER your tweets? + join_text: null, // [string] optional text in between date and tweet, try setting to "auto" + auto_join_text_default: "i said,", // [string] auto text for non verb: "i said" bullocks + auto_join_text_ed: "i", // [string] auto text for past tense: "i" surfed + auto_join_text_ing: "i am", // [string] auto tense for present tense: "i was" surfing + auto_join_text_reply: "i replied to", // [string] auto tense for replies: "i replied to" @someone "with" + auto_join_text_url: "i was looking at", // [string] auto tense for urls: "i was looking at" http:... + loading_text: null, // [string] optional loading text, displayed while tweets load + query: null // [string] optional search query + }; + + if(o) $.extend(s, o); + + $.fn.extend({ + linkUrl: function() { + var returning = []; + var regexp = /((ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?)/gi; + this.each(function() { + returning.push(this.replace(regexp,"$1")); + }); + return $(returning); + }, + linkUser: function() { + var returning = []; + var regexp = /[\@]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp,"@$1")); + }); + return $(returning); + }, + linkHash: function() { + var returning = []; + var regexp = / [\#]+([A-Za-z0-9-_]+)/gi; + this.each(function() { + returning.push(this.replace(regexp, ' #$1')); + }); + return $(returning); + }, + capAwesome: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(awesome)\b/gi, '$1')); + }); + return $(returning); + }, + capEpic: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/\b(epic)\b/gi, '$1')); + }); + return $(returning); + }, + makeHeart: function() { + var returning = []; + this.each(function() { + returning.push(this.replace(/(<)+[3]/gi, "")); + }); + return $(returning); + } + }); + + function relative_time(time_value) { + var parsed_date = Date.parse(time_value); + var relative_to = (arguments.length > 1) ? arguments[1] : new Date(); + var delta = parseInt((relative_to.getTime() - parsed_date) / 1000); + var pluralize = function (singular, n) { + return '' + n + ' ' + singular + (n == 1 ? '' : 's'); + }; + if(delta < 60) { + return 'less than a minute ago'; + } else if(delta < (45*60)) { + return 'about ' + pluralize("minute", parseInt(delta / 60)) + ' ago'; + } else if(delta < (24*60*60)) { + return 'about ' + pluralize("hour", parseInt(delta / 3600)) + ' ago'; + } else { + return 'about ' + pluralize("day", parseInt(delta / 86400)) + ' ago'; + } + } + + function build_url() { + var proto = ('https:' == document.location.protocol ? 'https:' : 'http:'); + if (s.list) { + return proto+"//api.twitter.com/1/"+s.username[0]+"/lists/"+s.list+"/statuses.json?per_page="+s.count+"&callback=?"; + } else if (s.query == null && s.username.length == 1) { + return proto+'//twitter.com/status/user_timeline/'+s.username[0]+'.json?count='+s.count+'&callback=?'; + } else { + var query = (s.query || 'from:'+s.username.join('%20OR%20from:')); + return proto+'//search.twitter.com/search.json?&q='+query+'&rpp='+s.count+'&callback=?'; + } + } + + return this.each(function(){ + var list = $('